content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import sys
import os
import json
# date and time
from datetime import datetime, timedelta
from email.utils import parsedate_tz
from dateutil import tz
import time
from api_extractor_config import DATETIME_FORMAT
def load_credentials(access):
credentials = {}
if access == 'AgProCanada_TableauDEV':
credentials = {
'MSSQL_HOST': os.environ['PYMSSQL_HOST'],
'MSSQL_DB': os.environ['PYMSSQL_DB'],
'MSSQL_USER': os.environ['PYMSSQL_USERNAME'],
'MSSQL_PASS': os.environ['PYMSSQL_PASS'],
'MSSQL_PORT': int(os.environ['PYMSSQL_PORT']),
'MSSQL_DRIVER': os.environ['PYMSSQL_DRIVER']
}
elif access == 'Youtube_API':
credentials = os.environ['YOUTUBE_API_CRED']
elif access == 'GA_API':
credentials = os.environ['GA_API_CRED']
elif access == 'Twitter_API':
credentials = {
"consumer_key": os.environ['TWITTER_CONSUMER_KEY'],
"consumer_secret": os.environ['TWITTER_CONSUMER_SECRET'],
"access_token_key": os.environ['TWITTER_ACCESS_TOKEN_KEY'],
"access_token_secret": os.environ['TWITTER_ACCESS_TOKEN_SECRET']
}
return credentials
def log(s):
timestamp = datetime.now().strftime(DATETIME_FORMAT)
print('> [%s]: %s' % (timestamp, s))
def remove_dups(l):
"""Remove duplcates from a list"""
return list(set(l))
def file_to_str(file_relative_path):
with open(file_relative_path, 'r') as file:
return file.read()
def str_to_datetime(datestring):
"""
String should be RFC822 compliant. Eg. 'Tue Mar 29 08:11:25 +0000 2011'
Used for twitter API dates
https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
"""
time_tuple = parsedate_tz(datestring.strip())
dt = datetime(*time_tuple[:6]) - timedelta(seconds=time_tuple[-1])
return dt
def utc_to_eastern(utc_dt):
"""
Convert a datetime obejct in UTC to one in Eastern Time Zone
The utc_dt can be 'naive' (meaning that it does not have tzinfo)
"""
eastern = tz.gettz('America/Eastern')
utc_dt = utc_dt.replace(tzinfo=tz.tzutc())
return utc_dt.astimezone(eastern)
def time_func(func, params):
"""
Time how long does it take to run a function.
"""
t0 = time.time()
return_val = func(*params)
t1 = time.time()
log("'%s' took %.3f seconds to run." % (func.__name__, t1 - t0))
return return_val | python |
import json
BATCH_SIZE = 128
RNN_SIZE = 128
EMBED_SIZE = 128
LEARNING_RATE = 0.001
KEEP_PROB = 0.75
EPOCHS = 500
DISPLAY_STEP = 30
MODEL_DIR = 'Saved_Model_Weights'
SAVE_PATH = 'model_saver'
MIN_LEARNING_RATE = 0.01
LEARNING_RATE_DECAY = 0.9
| python |
#!/usr/bin/env python
from __future__ import print_function
import cProfile
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import swn
def stats():
grouperLabels = ['Random',
'Min Dist Stars',
'Max Dist Stars',
'1/4 Min Dist Stars',
'1/3 Min Dist Stars',
'1/2 Min Dist Stars',
'Link Most Isolated Group',
'Link Smallest Group',
'Link Largest Group']
# Queue for returning counts
q = mp.Queue()
# Create processes
pList = list()
for gType in xrange(9):
p = mp.Process(target=statsgen,args=(q,gType))
pList.append(p)
p.start()
# Join processes
countsList = list()
for gType in xrange(9):
print('Grouper Method ' + str(gType))
pList[gType].join()
countsList.append(q.get())
# Plot statistics
font = {'size' : 8}
plt.rc('font', **font)
plt.figure(figsize=(8,10))
for gType in xrange(9):
plt.subplot(3,3,countsList[gType][0]+1)
plt.title(str(countsList[gType][0]) + ' - ' + grouperLabels[countsList[gType][0]],fontsize=8)
plt.imshow(countsList[gType][1])
plt.savefig('groupingStats.png')
def statsgen(q,gType):
# Define statistics
counts = np.zeros([21,16])
numSectors = 1000
# Generate sectors
for i in xrange(numSectors):
# Create generator
gen = swn.generator.Generator()
# Generate sector
sec = gen.sector(gType)
# Calculate statistics
for s in sec.system_hex_list():
if (s[1] % 2 == 0):
counts[s[0]*2, s[1]*2] += 1.0
counts[s[0]*2, s[1]*2+1] += 1.0
counts[s[0]*2+1,s[1]*2] += 1.0
counts[s[0]*2+1,s[1]*2+1] += 1.0
else:
counts[s[0]*2+1,s[1]*2] += 1.0
counts[s[0]*2+1,s[1]*2+1] += 1.0
counts[s[0]*2+2,s[1]*2] += 1.0
counts[s[0]*2+2,s[1]*2+1] += 1.0
q.put((gType,counts))
def gen(gType=1):
# Create generator
gen = swn.generator.Generator()
# Set seed
gen.set_seed('Bipiw')
# Print seed
#print(gen.seed)
# Generate sector
sec = gen.sector(gType)
# Print sector map
#sec.print_sector_map()
# Print system orbit maps
sec.print_orbit_maps()
# Print sector info
#sec.print_sector_info()
# Print sector corporations
#sec.print_corporations()
# Print sector religions
#sec.print_religions()
# Create sector images
sec.update_images()
# Draw sector images
sec.draw_sector()
# Save sector images
sec.images.save_sector_map('test/testmap.png')
sec.images.save_sector_info('test/testinfo.png')
sec.images.save_sector_orbits('test/map.png')
if __name__ == '__main__':
gen()
#stats()
#runStats = cProfile.run('gen()', sort='cumtime') | python |
from .abstract_conjunction import AbstractConjunction
from .condition_type import ConditionType
class OrConjunction(AbstractConjunction):
def __init__(self, conditions):
super().__init__(type_=ConditionType.OR.value, conditions=conditions)
| python |
import socket
from enum import IntEnum
import json
import argparse
# Enum of available commands
class Command(IntEnum):
Undefined = 1
SafeModeEnable = 2
SafeModeDisable = 3
ShowNumCommands = 4
ShowNumSafeModes = 5
ShowUpTime = 6
ResetCommandCounter = 7
Shutdown = 8
MAX_COMMAND_NUM = 9
# defaalt IP address to connect to
ADDRESS = '127.0.0.1'
# default port to connect to
PORT = 8080
# user prompt to request input
PROMPT = (
"\n"
"invalid: " + str(int(Command.Undefined)) + "\n"
"safe mode enable: " + str(int(Command.SafeModeEnable)) + "\n"
"safe mode disable: " + str(int(Command.SafeModeDisable)) + "\n"
"show number of commands received: " + str(int(Command.ShowNumCommands)) + "\n"
"show number of safe modes: " + str(int(Command.ShowNumSafeModes)) + "\n"
"show up time: " + str(int(Command.ShowUpTime)) + "\n"
"reset command counter: "+ str(int(Command.ResetCommandCounter)) + "\n"
"shutdown: " + str(int(Command.Shutdown)) + "\n"
"\n"
"So... what will it be, boss?\n"
"Type a number: "
)
# check if a string is an int
def IsInt(s):
try:
int(s)
return True
except ValueError:
return False
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-a", type=str, help="IP address to connect to")
parser.add_argument("-p", type=int, help="Port to connect to")
args = parser.parse_args()
if args.a:
ADDRESS = args.a
if args.p:
PORT = args.p
# connect to server and issue commands
print("Just wait a hot second, my dude.")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((ADDRESS, PORT))
print("Alright, alright, cool. Connection established. YOU'RE IN!")
while True:
command = input(PROMPT)
if (IsInt(command) and (int(command) >= 0) and (int(command) <= MAX_COMMAND_NUM)):
s.sendall(bytes(command, 'utf-8'))
data = s.recv(1024).decode("utf-8")
data = json.loads(data)
print("\nServer says:")
for key in data:
print(key, '->', data[key])
print("")
if (Command(int(command)) == Command.Shutdown):
break
else:
print("\nHmm, no. Did I say that was an option?")
print("This is Bravo Six, going dark.")
| python |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateSessionTargetResourceDetails(object):
"""
Details about a bastion session's target resource.
"""
#: A constant which can be used with the session_type property of a CreateSessionTargetResourceDetails.
#: This constant has a value of "MANAGED_SSH"
SESSION_TYPE_MANAGED_SSH = "MANAGED_SSH"
#: A constant which can be used with the session_type property of a CreateSessionTargetResourceDetails.
#: This constant has a value of "PORT_FORWARDING"
SESSION_TYPE_PORT_FORWARDING = "PORT_FORWARDING"
def __init__(self, **kwargs):
"""
Initializes a new CreateSessionTargetResourceDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.bastion.models.CreateManagedSshSessionTargetResourceDetails`
* :class:`~oci.bastion.models.CreatePortForwardingSessionTargetResourceDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param session_type:
The value to assign to the session_type property of this CreateSessionTargetResourceDetails.
Allowed values for this property are: "MANAGED_SSH", "PORT_FORWARDING"
:type session_type: str
:param target_resource_port:
The value to assign to the target_resource_port property of this CreateSessionTargetResourceDetails.
:type target_resource_port: int
"""
self.swagger_types = {
'session_type': 'str',
'target_resource_port': 'int'
}
self.attribute_map = {
'session_type': 'sessionType',
'target_resource_port': 'targetResourcePort'
}
self._session_type = None
self._target_resource_port = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['sessionType']
if type == 'MANAGED_SSH':
return 'CreateManagedSshSessionTargetResourceDetails'
if type == 'PORT_FORWARDING':
return 'CreatePortForwardingSessionTargetResourceDetails'
else:
return 'CreateSessionTargetResourceDetails'
@property
def session_type(self):
"""
**[Required]** Gets the session_type of this CreateSessionTargetResourceDetails.
The session type.
Allowed values for this property are: "MANAGED_SSH", "PORT_FORWARDING"
:return: The session_type of this CreateSessionTargetResourceDetails.
:rtype: str
"""
return self._session_type
@session_type.setter
def session_type(self, session_type):
"""
Sets the session_type of this CreateSessionTargetResourceDetails.
The session type.
:param session_type: The session_type of this CreateSessionTargetResourceDetails.
:type: str
"""
allowed_values = ["MANAGED_SSH", "PORT_FORWARDING"]
if not value_allowed_none_or_none_sentinel(session_type, allowed_values):
raise ValueError(
"Invalid value for `session_type`, must be None or one of {0}"
.format(allowed_values)
)
self._session_type = session_type
@property
def target_resource_port(self):
"""
Gets the target_resource_port of this CreateSessionTargetResourceDetails.
The port number to connect to on the target resource.
:return: The target_resource_port of this CreateSessionTargetResourceDetails.
:rtype: int
"""
return self._target_resource_port
@target_resource_port.setter
def target_resource_port(self, target_resource_port):
"""
Sets the target_resource_port of this CreateSessionTargetResourceDetails.
The port number to connect to on the target resource.
:param target_resource_port: The target_resource_port of this CreateSessionTargetResourceDetails.
:type: int
"""
self._target_resource_port = target_resource_port
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| python |
from django.test import TestCase
from dfirtrack_config.filter_forms import AssignmentFilterForm
class AssignmentFilterFormTestCase(TestCase):
"""assignment filter form tests"""
def test_case_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['case'].label, 'Filter for case')
def test_case_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['case'].empty_label, 'Filter for case')
def test_tag_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Filter for tag')
def test_tag_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['tag'].empty_label, 'Filter for tag')
def test_user_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['user'].label, 'Filter for user')
def test_user_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['user'].empty_label, 'No user assigned')
def test_filter_assignment_view_keep_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(
form.fields['filter_assignment_view_keep'].label,
'Remember filter settings (confirm by applying)',
)
def test_assignment_filter_form_empty(self):
"""test minimum form requirements / VALID"""
# get object
form = AssignmentFilterForm(data={})
# compare
self.assertTrue(form.is_valid())
| python |
from guy import Guy,http
@http(r"/item/(\d+)")
def getItem(web,number):
web.write( "item %s"%number )
def test_hook_with_classic_fetch(runner):
class T(Guy):
__doc__="""Hello
<script>
async function testHook() {
var r=await window.fetch("/item/42")
return await r.text()
}
</script>
"""
async def init(self):
retour =await self.js.testHook()
self.exit(retour)
t=T()
retour=runner(t)
assert retour == "item 42"
def test_hook_with_guy_fetch(runner):
class T(Guy):
__doc__="""Hello
<script>
async function testHook() {
var r=await guy.fetch("/item/42") // not needed in that case (no cors trouble!)
return await r.text()
}
</script>
"""
async def init(self):
retour =await self.js.testHook()
self.exit(retour)
t=T()
retour=runner(t)
assert retour == "item 42"
| python |
'''Google Sheets Tools'''
import os
from pathlib import Path
import subprocess
import pandas as pd
def save_csv(url: str, save_path: Path, sheet_name: str, show_summary=False):
'''Download a data sheet from Google Sheets and save to csv file'''
sheet_url = f'{url}&sheet={sheet_name}'
subprocess.run(('wget', '-o', '/dev/null', '-O', str(save_path), sheet_url), check=True)
recordings = pd.read_csv(str(save_path))
if show_summary:
print(recordings.head())
def main():
env_var = 'GOOGLE_SHEETS_URL'
url = os.environ.get(env_var)
assert url, f'Invalid {env_var}'
csv_path = Path('/tmp/road_roughness.csv')
save_csv(url, csv_path, 'recordings', show_summary=True)
if __name__ == '__main__':
main()
| python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# flake8: noqa
from __future__ import absolute_import
from __future__ import print_function
import io
from os import path
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
from setuptools.command.develop import develop
from setuptools.command.install import install
#here = path.abspath(path.dirname(__file__
here = os.path.abspath(os.path.dirname(__file__))
def read(*names, **kwargs):
return io.open(
path.join(here, *names),
encoding=kwargs.get("encoding", "utf8")
).read()
long_description = read("README.md")
requirements = read("requirements.txt").split("\n")
optional_requirements = {}
conda_prefix = os.getenv('CONDA_PREFIX')
windows = os.name == 'nt'
def get_pybind_include():
if windows:
return os.path.join(conda_prefix, 'Library', 'include')
return os.path.join(conda_prefix, 'include')
def get_eigen_include():
if windows:
return os.path.join(conda_prefix, 'Library', 'include', 'eigen3')
return os.path.join(conda_prefix, 'include', 'eigen3')
def get_library_dirs():
if windows:
return os.path.join(conda_prefix, 'Library', 'lib')
return os.path.join(conda_prefix, 'lib')
ext_modules = [
Extension(
'compas_wood._wood',
sorted([
'src/clipper.cpp',
'src/connection_zones.cpp',
'src/xxx_interop_python.cpp'
]),
include_dirs=[
'./include',
get_eigen_include(),
get_pybind_include()
],
library_dirs=[
get_library_dirs(),
],
libraries=['mpfr', 'gmp'],
language='c++'
),
]
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
import os
with tempfile.NamedTemporaryFile('w', suffix='.cpp', delete=False) as f:
f.write('int main (int argc, char **argv) { return 0; }')
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
# flags = ['-std=c++17', '-std=c++14', '-std=c++11']
flags = ['-std=c++14', '-std=c++11']
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc', '/std:c++14'],
'unix': [],
}
l_opts = {
'msvc': [],
'unix': [],
}
# if sys.platform == 'darwin':
# darwin_opts = ['-stdlib=libc++', '-mmacosx-version-min=10.14']
# c_opts['unix'] += darwin_opts
# l_opts['unix'] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
opts.append('-DCGAL_DEBUG=1')
for ext in self.extensions:
ext.define_macros = [('VERSION_INFO', '"{}"'.format(self.distribution.get_version()))]
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
setup(
name="compas_wood",
version="0.1.0",
description="Timber joinery generation based on CGAL library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibois-epfl/compas_wood",
author="petras vestartas",
author_email="[email protected]",
license="GPL-3 License",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: GPL-3 License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=[],
project_urls={},
packages=["compas_wood"],
package_dir={"": "src"},
# package_data={},
# data_files=[],
# include_package_data=True,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
setup_requires=['pybind11>=2.5.0'],
install_requires=requirements,
python_requires=">=3.6",
extras_require=optional_requirements,
zip_safe=False,
)
setup(
name="compas_wood",
version="0.1.0",
description="joinery generation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/IBOIS/compas_wood",
author="Petras Vestartas",
author_email="[email protected]",
license="MIT license",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=[],
project_urls={},
packages=["compas_wood"],
package_dir={"": "src"},
package_data={},
data_files=[],
include_package_data=True,
zip_safe=False,
install_requires=requirements,
python_requires=">=3.6",
extras_require=optional_requirements,
entry_points={
"console_scripts": [],
},
ext_modules=[],
)
| python |
# Copyright (c) 2021 Alethea Katherine Flowers.
# Published under the standard MIT License.
# Full text available at: https://opensource.org/licenses/MIT
"""Helps create releases for Winterbloom stuff"""
import atexit
import collections
import datetime
import importlib.util
import mimetypes
import os
import os.path
import shutil
import tempfile
import webbrowser
import requests
from wintertools import git
GITHUB_API_TOKEN = os.environ["GITHUB_API_KEY"]
mimetypes.init()
class _Artifacts:
directory = tempfile.mkdtemp()
items = []
atexit.register(lambda: shutil.rmtree(_Artifacts.directory, ignore_errors=True))
def _import_config(root):
config_path = os.path.join(root, ".github", "releasing", "config.py")
spec = importlib.util.spec_from_file_location("release_config", config_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _day_ordinal(day):
if 4 <= day <= 20 or 24 <= day <= 30:
return "th"
else:
return ["st", "nd", "rd"][day % 10 - 1]
def _git_info() -> dict:
info = {}
info["root"] = git.root()
info["repo"] = git.repo_name()
git.fetch_tags()
info["last_release"] = git.latest_tag()
# List of commits/changes since last version
changes = git.get_change_summary(info["last_release"], "HEAD")
# Arrange changes by category
categorized_changes = collections.defaultdict(list)
for change in changes:
if ": " in change:
category, change = change.split(": ", 1)
category = category.capitalize()
else:
category = "Other"
categorized_changes[category].append(change)
info["changes"] = categorized_changes
# Generate a new tag name
now = datetime.datetime.now()
info["tag"] = now.strftime(f"%Y.%m.{now.day}")
info["name"] = datetime.datetime.now().strftime(
f"%B {now.day}{_day_ordinal(now.day)}, %Y"
)
return info
def _github_session():
session = requests.Session()
session.headers["Accept"] = "application/vnd.github.v3+json"
session.headers["Authorization"] = f"Bearer {GITHUB_API_TOKEN}"
return session
def _create_release(session, git_info, description):
url = f"https://api.github.com/repos/{git_info['repo']}/releases"
response = session.post(
url,
json={
"tag_name": git_info["tag"],
"target_commitish": "main",
"name": git_info["name"],
"body": description,
"draft": True,
},
)
response.raise_for_status()
return response.json()
def _upload_release_artifact(session, release, artifact):
content_type, _ = mimetypes.guess_type(artifact["path"])
if not content_type:
content_type = "application/octet-string"
with open(artifact["path"], "rb") as fh:
response = session.post(
release["upload_url"].split("{", 1)[0],
params={
"name": artifact["name"],
},
headers={"Content-Type": content_type},
data=fh.read(),
)
response.raise_for_status()
def add_artifact(src, name, **details):
if not details:
details = {}
dst = os.path.join(_Artifacts.directory, name)
shutil.copy(src, dst)
details["name"] = name
details["path"] = dst
_Artifacts.items.append(details)
def main():
git_info = _git_info()
print(f"Working from {git_info['root']}")
os.chdir(git_info["root"])
print(f"Tagging {git_info['tag']}...")
git.tag(git_info["tag"])
print("Preparing artifacts...")
config = _import_config(git_info["root"])
config.prepare_artifacts(git_info)
print("Preparing release description...")
description = config.prepare_description(git_info, _Artifacts.items)
description = git.open_editor(description)
print("Creating release...")
gh = _github_session()
release = _create_release(gh, git_info, description)
for artifact in _Artifacts.items:
print(f"Uploading {artifact['name']}...")
_upload_release_artifact(gh, release, artifact)
webbrowser.open(release["html_url"])
if __name__ == "__main__":
main()
| python |
# Generated by Django 3.1.12 on 2021-08-06 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aidants_connect_web", "0064_merge_20210804_1156"),
]
operations = [
migrations.AlterField(
model_name="habilitationrequest",
name="email",
field=models.EmailField(max_length=150),
),
]
| python |
from django.shortcuts import render
from sch.models import search1
from sch.models import subs
# Create your views here.
def list(request):
select1=request.POST.get('select1')
select2=request.POST.get('select2')
ls = search1.objects.filter(City=select2)
print(select2)
print(select1)
return render(request,'search/search.html',{"ls1":ls})
def footer1(request):
return render(request,'mid/index.html.carousel_32cb')
def subs1(request):
if request.method=="POST":
print("email submitted")
email=request.POST['email']
print(email)
return render(request,'blood/index.html')
| python |
from gooey import options
from gooey_video import ffmpeg
def add_parser(parent):
parser = parent.add_parser('trim_crop', prog="Trim, Crop & Scale Video", help='Where does this show??')
input_group = parser.add_argument_group('Input', gooey_options=options.ArgumentGroup(
show_border=True
))
# basic details
input_group.add_argument(
'input',
metavar='Input',
help='The video you want to add a watermark to',
default=r'C:\Users\Chris\Dropbox\pretty_gui\Gooey\demo-screen-recording.mp4',
widget='FileChooser',
gooey_options=options.FileChooser(
wildcard='video files (*.mp4)|*.mp4',
full_width=True
))
settings = parser.add_argument_group(
'Trim Settings',
gooey_options=options.ArgumentGroup(
show_border=True
))
start_position = settings.add_mutually_exclusive_group(gooey_options=options.MutexGroup(
initial_selection=0
))
start_position.add_argument(
'--start-ss',
metavar='Start position',
help='Start position in seconds',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=99999,
increment_size=1
))
start_position.add_argument(
'--start-ts',
metavar='Start position',
help='start-position as a concrete timestamp',
gooey_options=options.TextField(
placeholder='HH:MM:SS',
validator=options.RegexValidator(
test='^\d{2}:\d{2}:\d{2}$',
message='Must be in the format HH:MM:SS'
)
))
end = settings.add_mutually_exclusive_group(
gooey_options=options.MutexGroup(
initial_selection=0
))
end.add_argument(
'--end-ss',
metavar='End position',
help='Total duration from the start (seconds)',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=99999,
increment_size=1
))
end.add_argument(
'--end-ts',
metavar='End position',
help='End position as a concrete timestamp',
gooey_options=options.TextField(
placeholder='HH:MM:SS',
validator=options.RegexValidator(
test='^\d{2}:\d{2}:\d{2}$',
message='Must be in the format HH:MM:SS'
)
))
crop_settings = parser.add_argument_group('Crop Settings', gooey_options=options.ArgumentGroup(
show_border=True
))
crop_settings.add_argument(
'--enable-crop',
metavar='Crop Video',
help='Enable the cropping filters',
action='store_true',
gooey_options=options.LayoutOptions(
full_width=True,
show_label=False
)
)
crop_settings.add_argument(
'--crop-width',
metavar='Width',
help='Width of the cropped region',
default=640,
widget='IntegerField',
gooey_options=options.IntegerField(
min=1,
max=1920
))
crop_settings.add_argument(
'--crop-height',
metavar='Height',
help='Height of the cropped region',
default=480,
widget='IntegerField',
gooey_options=options.IntegerField(
min=1,
max=1080
))
crop_settings.add_argument(
'--crop-x',
metavar='Margin left',
help='X position where to position the crop region',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=1920
))
crop_settings.add_argument(
'--crop-y',
metavar='Margin top',
help='Y position where to position the crop region',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=1080
))
scale = parser.add_argument_group('Crop Settings', gooey_options=options.ArgumentGroup(
show_border=True
))
scale.add_argument(
'--scale-width',
metavar='Width',
help='Scale the video to this width (-1 preserves aspect ratio)',
default=-1,
widget='IntegerField',
gooey_options=options.IntegerField(
min=-1,
max=1920
))
scale.add_argument(
'--scale-height',
metavar='Height',
help='Scale the video to this height (-1 preserves aspect ratio)',
default=-1,
widget='IntegerField',
gooey_options=options.IntegerField(
min=-2,
max=1080
))
output_group = parser.add_argument_group('Output', gooey_options=options.ArgumentGroup(
show_border=True
))
output_group.add_argument(
'output',
help='Choose where to save the output video',
default=r'C:\Users\Chris\Desktop\output.mp4',
widget='FileSaver',
gooey_options=options.FileSaver(
wildcard='video files (*.mp4)|*.mp4',
default_file='output.mp4',
full_width=True
))
output_group.add_argument(
'--overwrite',
metavar='Overwrite existing',
help='Overwrite the output video if it already exists?',
action='store_const',
default=True,
const='-y',
widget='CheckBox')
return parser
def run(args):
template = 'ffmpeg.exe ' \
'-i "{input}" ' \
'-ss {trim_start} ' \
'-to {trim_end} ' \
'-filter:v "crop={crop_w}:{crop_h}:{crop_x}:{crop_y},scale={scale_w}:{scale_h}" ' \
'{overwrite} ' \
'"{output}"'
cmd = template.format(
input=args.input,
trim_start=args.start_ts or args.start_ss or 0,
trim_end=args.end_ts or args.end_ss or '99:59:59',
crop_w=args.crop_width if args.enable_crop else 'iw',
crop_h=args.crop_height if args.enable_crop else 'ih',
crop_x=args.crop_x if args.enable_crop else 0,
crop_y=args.crop_y if args.enable_crop else 0,
scale_w=args.scale_width,
scale_h=args.scale_height,
overwrite=args.overwrite,
output=args.output
)
ffmpeg.run(cmd) | python |
import pytest
from gpiozero import Device
from gpiozero.pins.mock import MockFactory, MockPWMPin
from pytenki import PyTenki
@pytest.yield_fixture
def mock_factory(request):
save_factory = Device.pin_factory
Device.pin_factory = MockFactory()
yield Device.pin_factory
if Device.pin_factory is not None:
Device.pin_factory.reset()
Device.pin_factory = save_factory
@pytest.fixture
def pwm(request, mock_factory):
mock_factory.pin_class = MockPWMPin
@pytest.fixture(scope='module')
def led_pins():
return {
'fine': 4,
'cloud': 17,
'rain': 27,
'snow': 22,
}
@pytest.fixture(scope='module')
def button_pin():
return 2
@pytest.fixture
def pytenki(mock_factory, pwm):
return PyTenki()
@pytest.fixture
def pytenki_init(mock_factory, pwm, led_pins, button_pin):
return PyTenki(led_pins=led_pins, button_pin=button_pin)
| python |
a=list(map(int,input().split()))
n=len(a)
l=[]
m=0
j=n-1
for i in range(n-2,0,-1):
if(a[i]>a[i-1] and a[i]>a[0]):
m=max(m,a[i]-a[0])
#print(m)
elif(a[i]<a[i-1]):
j=i
m=0
l.append(m)
print(m)
m=0
while(j<n-1):
m=max(m,a[n-1]-a[j])
j+=1
l.append(m)
print(m)
print(sum(l))
| python |
""" Pacakge for various utilities """
| python |
# type:ignore
from django.conf.urls import include, url
from . import views
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('newproject', views.create_project, name = "create_project"),
path('profile/<username>', views.profile, name='profile'),
path("post/<int:id>", views.view_project, name="post_item"),
path("project/<int:id>", views.view_project, name="view_project"),
url(r"^api/project/$", views.ProjectList.as_view()),
url(r"api/project/project-id/(?P<pk>[0-9]+)/$", views.ProjectDescription.as_view()),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | python |
"""
Arrangement of panes.
Don't confuse with the prompt_toolkit VSplit/HSplit classes. This is a higher
level abstraction of the Pymux window layout.
An arrangement consists of a list of windows. And a window has a list of panes,
arranged by ordering them in HSplit/VSplit instances.
"""
from __future__ import unicode_literals
from ptterm import Terminal
from prompt_toolkit.application.current import get_app, set_app
from prompt_toolkit.buffer import Buffer
import math
import os
import weakref
import six
__all__ = (
'LayoutTypes',
'Pane',
'HSplit',
'VSplit',
'Window',
'Arrangement',
)
class LayoutTypes:
# The values are in lowercase with dashes, because that is what users can
# use at the command line.
EVEN_HORIZONTAL = 'even-horizontal'
EVEN_VERTICAL = 'even-vertical'
MAIN_HORIZONTAL = 'main-horizontal'
MAIN_VERTICAL = 'main-vertical'
TILED = 'tiled'
_ALL = [EVEN_HORIZONTAL, EVEN_VERTICAL, MAIN_HORIZONTAL, MAIN_VERTICAL, TILED]
class Pane(object):
"""
One pane, containing one process and a search buffer for going into copy
mode or displaying the help.
"""
_pane_counter = 1000 # Start at 1000, to be sure to not confuse this with pane indexes.
def __init__(self, terminal=None):
assert isinstance(terminal, Terminal)
self.terminal = terminal
self.chosen_name = None
# Displayed the clock instead of this pane content.
self.clock_mode = False
# Give unique ID.
Pane._pane_counter += 1
self.pane_id = Pane._pane_counter
# Prompt_toolkit buffer, for displaying scrollable text.
# (In copy mode, or help mode.)
# Note: Because the scroll_buffer can only contain text, we also use the
# get_tokens_for_line, that returns the token list with color
# information for each line.
self.scroll_buffer = Buffer(read_only=True)
self.copy_get_tokens_for_line = lambda lineno: []
self.display_scroll_buffer = False
self.scroll_buffer_title = ''
@property
def process(self):
return self.terminal.process
@property
def name(self):
"""
The name for the window as displayed in the title bar and status bar.
"""
# Name, explicitely set for the pane.
if self.chosen_name:
return self.chosen_name
else:
# Name from the process running inside the pane.
name = self.process.get_name()
if name:
return os.path.basename(name)
return ''
def enter_copy_mode(self):
"""
Suspend the process, and copy the screen content to the `scroll_buffer`.
That way the user can search through the history and copy/paste.
"""
self.terminal.enter_copy_mode()
def focus(self):
"""
Focus this pane.
"""
get_app().layout.focus(self.terminal)
class _WeightsDictionary(weakref.WeakKeyDictionary):
"""
Dictionary for the weights: weak keys, but defaults to 1.
(Weights are used to represent the proportion of pane sizes in
HSplit/VSplit lists.)
This dictionary maps the child (another HSplit/VSplit or Pane), to the
size. (Integer.)
"""
def __getitem__(self, key):
try:
# (Don't use 'super' here. This is a classobj in Python2.)
return weakref.WeakKeyDictionary.__getitem__(self, key)
except KeyError:
return 1
class _Split(list):
"""
Base class for horizontal and vertical splits. (This is a higher level
split than prompt_toolkit.layout.HSplit.)
"""
def __init__(self, *a, **kw):
list.__init__(self, *a, **kw)
# Mapping children to its weight.
self.weights = _WeightsDictionary()
def __hash__(self):
# Required in order to add HSplit/VSplit to the weights dict. "
return id(self)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, list.__repr__(self))
class HSplit(_Split):
""" Horizontal split. """
class VSplit(_Split):
""" Horizontal split. """
class Window(object):
"""
Pymux window.
"""
_window_counter = 1000 # Start here, to avoid confusion with window index.
def __init__(self, index=0):
self.index = index
self.root = HSplit()
self._active_pane = None
self._prev_active_pane = None
self.chosen_name = None
self.previous_selected_layout = None
#: When true, the current pane is zoomed in.
self.zoom = False
#: When True, send input to all panes simultaniously.
self.synchronize_panes = False
# Give unique ID.
Window._window_counter += 1
self.window_id = Window._window_counter
def invalidation_hash(self):
"""
Return a hash (string) that can be used to determine when the layout
has to be rebuild.
"""
# if not self.root:
# return '<empty-window>'
def _hash_for_split(split):
result = []
for item in split:
if isinstance(item, (VSplit, HSplit)):
result.append(_hash_for_split(item))
elif isinstance(item, Pane):
result.append('p%s' % item.pane_id)
if isinstance(split, HSplit):
return 'HSplit(%s)' % (','.join(result))
else:
return 'VSplit(%s)' % (','.join(result))
return '<window_id=%s,zoom=%s,children=%s>' % (
self.window_id, self.zoom, _hash_for_split(self.root))
@property
def active_pane(self):
"""
The current active :class:`.Pane`.
"""
return self._active_pane
@active_pane.setter
def active_pane(self, value):
assert isinstance(value, Pane)
# Remember previous active pane.
if self._active_pane:
self._prev_active_pane = weakref.ref(self._active_pane)
self.zoom = False
self._active_pane = value
@property
def previous_active_pane(self):
"""
The previous active :class:`.Pane` or `None` if unknown.
"""
p = self._prev_active_pane and self._prev_active_pane()
# Only return when this pane actually still exists in the current
# window.
if p and p in self.panes:
return p
@property
def name(self):
"""
The name for this window as it should be displayed in the status bar.
"""
# Name, explicitely set for the window.
if self.chosen_name:
return self.chosen_name
else:
pane = self.active_pane
if pane:
return pane.name
return ''
def add_pane(self, pane, vsplit=False):
"""
Add another pane to this Window.
"""
assert isinstance(pane, Pane)
assert isinstance(vsplit, bool)
split_cls = VSplit if vsplit else HSplit
if self.active_pane is None:
self.root.append(pane)
else:
parent = self._get_parent(self.active_pane)
same_direction = isinstance(parent, split_cls)
index = parent.index(self.active_pane)
if same_direction:
parent.insert(index + 1, pane)
else:
new_split = split_cls([self.active_pane, pane])
parent[index] = new_split
# Give the newly created split the same weight as the original
# pane that was at this position.
parent.weights[new_split] = parent.weights[self.active_pane]
self.active_pane = pane
self.zoom = False
def remove_pane(self, pane):
"""
Remove pane from this Window.
"""
assert isinstance(pane, Pane)
if pane in self.panes:
# When this pane was focused, switch to previous active or next in order.
if pane == self.active_pane:
if self.previous_active_pane:
self.active_pane = self.previous_active_pane
else:
self.focus_next()
# Remove from the parent. When the parent becomes empty, remove the
# parent itself recursively.
p = self._get_parent(pane)
p.remove(pane)
while len(p) == 0 and p != self.root:
p2 = self._get_parent(p)
p2.remove(p)
p = p2
# When the parent has only one item left, collapse into its parent.
while len(p) == 1 and p != self.root:
p2 = self._get_parent(p)
p2.weights[p[0]] = p2.weights[p] # Keep dimensions.
i = p2.index(p)
p2[i] = p[0]
p = p2
@property
def panes(self):
" List with all panes from this Window. "
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item)
return result
@property
def splits(self):
" Return a list with all HSplit/VSplit instances. "
result = []
def collect(split):
result.append(split)
for item in split:
if isinstance(item, (HSplit, VSplit)):
collect(item)
collect(self.root)
return result
def _get_parent(self, item):
" The HSplit/VSplit that contains the active pane. "
for s in self.splits:
if item in s:
return s
@property
def has_panes(self):
" True when this window contains at least one pane. "
return len(self.panes) > 0
@property
def active_process(self):
" Return `Process` that should receive user input. "
p = self.active_pane
if p is not None:
return p.process
def focus_next(self, count=1):
" Focus the next pane. "
panes = self.panes
if panes:
self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)]
else:
self.active_pane = None # No panes left.
def focus_previous(self):
" Focus the previous pane. "
self.focus_next(count=-1)
def rotate(self, count=1, with_pane_before_only=False, with_pane_after_only=False):
"""
Rotate panes.
When `with_pane_before_only` or `with_pane_after_only` is True, only rotate
with the pane before/after the active pane.
"""
# Create (split, index, pane, weight) tuples.
items = []
current_pane_index = None
for s in self.splits:
for index, item in enumerate(s):
if isinstance(item, Pane):
items.append((s, index, item, s.weights[item]))
if item == self.active_pane:
current_pane_index = len(items) - 1
# Only before after? Reduce list of panes.
if with_pane_before_only:
items = items[current_pane_index - 1:current_pane_index + 1]
elif with_pane_after_only:
items = items[current_pane_index:current_pane_index + 2]
# Rotate positions.
for i, triple in enumerate(items):
split, index, pane, weight = triple
new_item = items[(i + count) % len(items)][2]
split[index] = new_item
split.weights[new_item] = weight
def select_layout(self, layout_type):
"""
Select one of the predefined layouts.
"""
assert layout_type in LayoutTypes._ALL
# When there is only one pane, always choose EVEN_HORIZONTAL,
# Otherwise, we create VSplit/HSplit instances with an empty list of
# children.
if len(self.panes) == 1:
layout_type = LayoutTypes.EVEN_HORIZONTAL
# even-horizontal.
if layout_type == LayoutTypes.EVEN_HORIZONTAL:
self.root = HSplit(self.panes)
# even-vertical.
elif layout_type == LayoutTypes.EVEN_VERTICAL:
self.root = VSplit(self.panes)
# main-horizontal.
elif layout_type == LayoutTypes.MAIN_HORIZONTAL:
self.root = HSplit([
self.active_pane,
VSplit([p for p in self.panes if p != self.active_pane])
])
# main-vertical.
elif layout_type == LayoutTypes.MAIN_VERTICAL:
self.root = VSplit([
self.active_pane,
HSplit([p for p in self.panes if p != self.active_pane])
])
# tiled.
elif layout_type == LayoutTypes.TILED:
panes = self.panes
column_count = math.ceil(len(panes) ** .5)
rows = HSplit()
current_row = VSplit()
for p in panes:
current_row.append(p)
if len(current_row) >= column_count:
rows.append(current_row)
current_row = VSplit()
if current_row:
rows.append(current_row)
self.root = rows
self.previous_selected_layout = layout_type
def select_next_layout(self, count=1):
"""
Select next layout. (Cycle through predefined layouts.)
"""
# List of all layouts. (When we have just two panes, only toggle
# between horizontal/vertical.)
if len(self.panes) == 2:
all_layouts = [LayoutTypes.EVEN_HORIZONTAL, LayoutTypes.EVEN_VERTICAL]
else:
all_layouts = LayoutTypes._ALL
# Get index of current layout.
layout = self.previous_selected_layout or LayoutTypes._ALL[-1]
try:
index = all_layouts.index(layout)
except ValueError:
index = 0
# Switch to new layout.
new_layout = all_layouts[(index + count) % len(all_layouts)]
self.select_layout(new_layout)
def select_previous_layout(self):
self.select_next_layout(count=-1)
def change_size_for_active_pane(self, up=0, right=0, down=0, left=0):
"""
Increase the size of the current pane in any of the four directions.
"""
child = self.active_pane
self.change_size_for_pane(child, up=up, right=right, down=down, left=left)
def change_size_for_pane(self, pane, up=0, right=0, down=0, left=0):
"""
Increase the size of the current pane in any of the four directions.
Positive values indicate an increase, negative values a decrease.
"""
assert isinstance(pane, Pane)
def find_split_and_child(split_cls, is_before):
" Find the split for which we will have to update the weights. "
child = pane
split = self._get_parent(child)
def found():
return isinstance(split, split_cls) and (
not is_before or split.index(child) > 0) and (
is_before or split.index(child) < len(split) - 1)
while split and not found():
child = split
split = self._get_parent(child)
return split, child # split can be None!
def handle_side(split_cls, is_before, amount, trying_other_side=False):
" Increase weights on one side. (top/left/right/bottom). "
if amount:
split, child = find_split_and_child(split_cls, is_before)
if split:
# Find neighbour.
neighbour_index = split.index(child) + (-1 if is_before else 1)
neighbour_child = split[neighbour_index]
# Increase/decrease weights.
split.weights[child] += amount
split.weights[neighbour_child] -= amount
# Ensure that all weights are at least one.
for k, value in split.weights.items():
if value < 1:
split.weights[k] = 1
else:
# When no split has been found where we can move in this
# direction, try to move the other side instead using a
# negative amount. This happens when we run "resize-pane -R 4"
# inside the pane that is completely on the right. In that
# case it's logical to move the left border to the right
# instead.
if not trying_other_side:
handle_side(split_cls, not is_before, -amount,
trying_other_side=True)
handle_side(VSplit, True, left)
handle_side(VSplit, False, right)
handle_side(HSplit, True, up)
handle_side(HSplit, False, down)
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane)
class Arrangement(object):
"""
Arrangement class for one Pymux session.
This contains the list of windows and the layout of the panes for each
window. All the clients share the same Arrangement instance, but they can
have different windows active.
"""
def __init__(self):
self.windows = []
self.base_index = 0
self._active_window_for_cli = weakref.WeakKeyDictionary()
self._prev_active_window_for_cli = weakref.WeakKeyDictionary()
# The active window of the last CLI. Used as default when a new session
# is attached.
self._last_active_window = None
def invalidation_hash(self):
"""
When this changes, the layout needs to be rebuild.
"""
if not self.windows:
return '<no-windows>'
w = self.get_active_window()
return w.invalidation_hash()
def get_active_window(self):
"""
The current active :class:`.Window`.
"""
app = get_app()
try:
return self._active_window_for_cli[app]
except KeyError:
self._active_window_for_cli[app] = self._last_active_window or self.windows[0]
return self.windows[0]
def set_active_window(self, window):
assert isinstance(window, Window)
app = get_app()
previous = self.get_active_window()
self._prev_active_window_for_cli[app] = previous
self._active_window_for_cli[app] = window
self._last_active_window = window
def set_active_window_from_pane_id(self, pane_id):
"""
Make the window with this pane ID the active Window.
"""
assert isinstance(pane_id, int)
for w in self.windows:
for p in w.panes:
if p.pane_id == pane_id:
self.set_active_window(w)
def get_previous_active_window(self):
" The previous active Window or None if unknown. "
app = get_app()
try:
return self._prev_active_window_for_cli[app]
except KeyError:
return None
def get_window_by_index(self, index):
" Return the Window with this index or None if not found. "
for w in self.windows:
if w.index == index:
return w
def create_window(self, pane, name=None, set_active=True):
"""
Create a new window that contains just this pane.
:param pane: The :class:`.Pane` instance to put in the new window.
:param name: If given, name for the new window.
:param set_active: When True, focus the new window.
"""
assert isinstance(pane, Pane)
assert name is None or isinstance(name, six.text_type)
# Take the first available index.
taken_indexes = [w.index for w in self.windows]
index = self.base_index
while index in taken_indexes:
index += 1
# Create new window and add it.
w = Window(index)
w.add_pane(pane)
self.windows.append(w)
# Sort windows by index.
self.windows = sorted(self.windows, key=lambda w: w.index)
app = get_app(return_none=True)
if app is not None and set_active:
self.set_active_window(w)
if name is not None:
w.chosen_name = name
assert w.active_pane == pane
assert w._get_parent(pane)
def move_window(self, window, new_index):
"""
Move window to a new index.
"""
assert isinstance(window, Window)
assert isinstance(new_index, int)
window.index = new_index
# Sort windows by index.
self.windows = sorted(self.windows, key=lambda w: w.index)
def get_active_pane(self):
"""
The current :class:`.Pane` from the current window.
"""
w = self.get_active_window()
if w is not None:
return w.active_pane
def remove_pane(self, pane):
"""
Remove a :class:`.Pane`. (Look in all windows.)
"""
assert isinstance(pane, Pane)
for w in self.windows:
w.remove_pane(pane)
# No panes left in this window?
if not w.has_panes:
# Focus next.
for app, active_w in self._active_window_for_cli.items():
if w == active_w:
with set_app(app):
self.focus_next_window()
self.windows.remove(w)
def focus_previous_window(self):
w = self.get_active_window()
self.set_active_window(self.windows[
(self.windows.index(w) - 1) % len(self.windows)])
def focus_next_window(self):
w = self.get_active_window()
self.set_active_window(self.windows[
(self.windows.index(w) + 1) % len(self.windows)])
def break_pane(self, set_active=True):
"""
When the current window has multiple panes, remove the pane from this
window and put it in a new window.
:param set_active: When True, focus the new window.
"""
w = self.get_active_window()
if len(w.panes) > 1:
pane = w.active_pane
self.get_active_window().remove_pane(pane)
self.create_window(pane, set_active=set_active)
def rotate_window(self, count=1):
" Rotate the panes in the active window. "
w = self.get_active_window()
w.rotate(count=count)
@property
def has_panes(self):
" True when any of the windows has a :class:`.Pane`. "
for w in self.windows:
if w.has_panes:
return True
return False
| python |
from microsetta_public_api.utils._utils import (
jsonify,
DataTable,
create_data_entry,
)
__all__ = [
'testing',
'jsonify',
'DataTable',
'create_data_entry',
]
| python |
from __future__ import annotations
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import numpy as np
import pandas as pd
import datetime
import tensorflow as tf
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, mean_absolute_error
from .define_model import (
cnnLSTMModel,
convLSTMModel,
mlpModel,
convModel,
)
from src.features.build_features import DataBlock, to_supervised, to_supervised_shuffled
from pickle import dump, load
np.random.seed(42)
def scale_and_encode(dataframe, subject: int):
"""
Function to scale numerical features and one hot encode categorical ones
Args:
dataframe: pd.DataFrame -> a pandas dataframe containing the data
Returns:
self.scaled_array:np.array -> a numpy array of scaled and encoded features
"""
# the numeric features which are not dependent on the subject description
numeric_features = ["bvp", "acc_x", "acc_y", "acc_z", "bmi", "age"]
# cat_features = ["sport"]
# create a pipeline to do the transformation
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
# categorical_transformer = Pipeline(steps=[("encoder", OneHotEncoder())])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
# ("cat", categorical_transformer, cat_features),
],
remainder="passthrough",
)
# fit the columntransformer to the dataframe
preprocessor.fit(dataframe)
# save the preprocessor as we will fit this scaler to validation and testing sets
dump(preprocessor, open("models/scaler_and_encoder_{}.pkl".format(subject), "wb"))
# # return the transformed array
return preprocessor.transform(dataframe)
class TrainModel:
"""
Class to handle training using a convLSTM model
"""
def __init__(
self,
train_subjects: list,
valid_subjects: list,
n_timesteps: int,
n_features: int,
n_conv_layers: int,
n_conv_filters: int,
kernel_size: int,
n_lstm_units: int,
n_dense_nodes: int,
n_output_nodes: int,
n_seq: int,
batch_size: int,
epochs: int,
scaler_encoder=None,
):
# define the model
self.model = cnnLSTMModel(
n_conv_layers=n_conv_layers,
n_conv_filters=n_conv_filters,
kernel_size=kernel_size,
n_lstm_units=n_lstm_units,
n_dense_nodes=n_dense_nodes,
n_output_nodes=n_output_nodes,
input_shape=(None, n_timesteps // n_seq, n_features),
)
# compile the model
self.model.compile(loss="mse", metrics="mae", optimizer="adam")
# define the train, test and valid subjects
self.train_subjects = train_subjects
self.test_subjects = []
self.valid_subjects = valid_subjects
# define the number of timesteps used in prediction
self.timesteps = n_timesteps
# define number of features used in the model
self.features = n_features
# # define the length of each subsequence
self.seq = n_seq
# define the batch size
self.batch_size = batch_size
# define epochs
self.epochs = epochs
# valid scores
self.valid_score = 0
# load scaler
self.scaler_encoder = scaler_encoder
def load_data(self, subject: int):
"""
Function to load data for training
Args:
subject: int -> the subject for which data is being loaded
Returns:
X,y : np.array -> training data and labels
"""
# load the dataframe
data = DataBlock("S{}".format(subject), "data/raw/")
df = data.raw_dataframe
# # name the columns
# df.columns = [
# "bvp",
# "acc_x",
# "acc_y",
# "acc_z",
# "gender",
# "age",
# "sport",
# "bmi",
# "heart_rate",
# ]
# if scaling and encoding needs to be done, load the scaler encoder and transform the dataframe
if self.scaler_encoder:
df = self.scaler_encoder.transform(df)
X, y = to_supervised(np.array(df), self.timesteps, 1)
# reshape the X array to meet the requirements of the model
X = self.reshape(X)
return X, y
def train(self):
"""
Function to run training
"""
for sub in self.train_subjects:
# load training and validation data
print("-------------------------------------")
print("training on subject - {}".format(sub))
print("-------------------------------------")
train_X, train_y = self.load_data(subject=sub)
# define callbacks
# early stopping
es_callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=5)
log_dir = "models/logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S"
)
# tensorboard callback
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
# fit the model and save history
self.model.fit(
train_X,
train_y,
epochs=self.epochs,
batch_size=self.batch_size,
callbacks=[es_callback, tb_callback],
verbose=0,
)
print("-------------------------------------")
print("testing on subject - {}".format(self.valid_subjects[0]))
print("-------------------------------------")
# check performance on hold out validation set
valid_X, valid_y = self.load_data(subject=self.valid_subjects[0])
yhat = process.model.predict(valid_X)
# calculate mae of model predictions on validation data
mae = mean_absolute_error(valid_y, yhat)
self.valid_score = mae
# save the model
self.model.save("models/ckpoints/model_{}".format(self.valid_subjects[0]))
# def train_shuffled(
# self,
# train_X: np.array,
# train_y: np.array,
# valid_X: np.array,
# valid_y: np.array,
# valid_subject: int,
# ):
# """
# Function to run training
# """
# # define callbacks
# # early stopping
# es_callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=5)
# log_dir = "models/logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# # tensorboard callback
# tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
# # fit the model and save history
# self.model.fit(
# train_X,
# train_y,
# epochs=self.epochs,
# batch_size=self.batch_size,
# callbacks=[es_callback, tb_callback],
# )
# yhat = process.model.predict(valid_X)
# mae = mean_absolute_error(valid_y, yhat)
# self.valid_score = mae
# self.model.save("models/ckpoints/model_{}".format(valid_subject))
def reshape(self, X: np.array):
"Function which reshapes the input data into the required shape for CNN LSTM model"
return X.reshape(
(X.shape[0], self.seq, self.timesteps // self.seq, self.features)
)
if __name__ == "__main__":
total_subjects = list(range(1, 16))
val_scores = []
# iterate through each subject and treat it as validation set
for i in total_subjects:
print("******************************************")
print("training fold - {}".format(i))
print("******************************************")
# defining training and validation subjects
train_subjects = [x for x in total_subjects if x != i]
valid_subjects = [i]
# initiate a list of dataframes
list_of_dfs = []
# append all the dataframes in the training set
for subject in train_subjects:
data = DataBlock("S{}".format(subject), "data/raw/")
df = data.raw_dataframe
list_of_dfs.append(df)
# create a concatenated dataframe
frames = pd.concat(list_of_dfs)
# scale and encode training set
sf_frames = scale_and_encode(frames, i)
# use the saved scaler encoder for later use with validation set
saved_scaler_encoder = load(
open("models/scaler_and_encoder_{}.pkl".format(i), "rb")
)
# define number of features
n_features = 8
# instantiate the training model process -> for each training fold, the model is freshly initiated
process = TrainModel(
train_subjects=train_subjects,
valid_subjects=valid_subjects,
n_timesteps=8,
n_features=n_features,
n_conv_layers=2,
n_conv_filters=20,
kernel_size=4,
n_lstm_units=64,
n_dense_nodes=32,
n_output_nodes=1,
n_seq=1,
batch_size=100,
epochs=100,
scaler_encoder=saved_scaler_encoder,
)
# run training
process.train()
# print and save validation scores
print(
"validation score on subject -{} ".format(valid_subjects[0]),
process.valid_score,
)
val_scores.append(process.valid_score)
print(val_scores)
| python |
#It is necessary to import the datetime module when handling date and time
import datetime
currentTime = datetime.datetime.now()
currentDate = datetime.date.today()
#This will print the date
#print(currentDate)
#This the year
#print(currentDate.year)
#This the month
#print(currentDate.month)
#And this the day...
#print(currentDate.day)
#The "strftime()" function is a more common way for getting specific elements of date
#day = currentDate.strftime('%d')
#month = currentDate.strftime('%B')
#year = currentDate.strftime('%Y')
#This will print today's date
#print("Today's date is the " + day + "th of " + month + ", " + year + ".")
print("Okay, what if I told you I could guess how many days till your birthday...")
userBirthday = input("When's your birthday? Write it here: ")
try:
bday = datetime.datetime.strptime(userBirthday, '%d/%m/%Y').date()
except ValueError:
print("Oh sorry, my bad... You are meant to put it in this format; dd/mm/yyyy.")
userBirthday = input("When's your next birthday? Write it here: ")
try:
bday = datetime.datetime.strptime(userBirthday, '%d/%m/%Y').date()
except ValueError:
print("Invalid input... Input not processed...")
try:
daysTillBday = bday - currentDate
print("I think I got that... Ok, so there are " + str(daysTillBday.days) + " days till you birthday right?")
except:
print("Uh oh... \nI couldn't really catch your birthday, no worries, there's always next time...")
print("Goodbye.")
| python |
RAD_FILE_FOLDER = ""
path_stack = [] #wrt RAD_FILE_FOLDER
JSON_FILE_FOLDER = "" | python |
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from batchgenerators.augmentations.utils import resize_segmentation
from uuunet.experiment_planning.plan_and_preprocess_task import get_caseIDs_from_splitted_dataset_folder
from uuunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Process, Queue
import torch
import threading
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import SimpleITK as sitk
import shutil
from multiprocessing import Pool
from uuunet.training.model_restore import load_model_and_checkpoint_files
from uuunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from uuunet.utilities.one_hot_encoding import to_one_hot
def plot_images(img, img2=None):
"""
Plot at most 2 images.
Support passing in ndarray or image path string.
"""
fig = plt.figure(figsize=(20,10))
if isinstance(img, str): img = imread(img)
if isinstance(img2, str): img2 = imread(img2)
if img2 is None:
ax = fig.add_subplot(111)
ax.imshow(img)
else:
height, width = img.shape[0], img.shape[1]
if height < width:
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.imshow(img)
ax2.imshow(img2)
plt.show()
def view_batch(imgs, lbls, labels=['image', 'label'], stack=False):
'''
imgs: [D, H, W, C], the depth or batch dimension should be the first.
'''
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.set_title(labels[0])
ax2.set_title(labels[1])
"""
if init with zeros, the animation may not update? seems bug in animation.
"""
if stack:
lbls = np.stack((lbls, imgs, imgs), -1)
img1 = ax1.imshow(np.random.rand(*imgs.shape[1:]))
img2 = ax2.imshow(np.random.rand(*lbls.shape[1:]))
def update(i):
plt.suptitle(str(i))
img1.set_data(imgs[i])
img2.set_data(lbls[i])
return img1, img2
ani = animation.FuncAnimation(fig, update, frames=len(imgs), interval=10, blit=False, repeat_delay=0)
plt.show()
def predict_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes):
errors_in = []
for i, l in enumerate(list_of_lists):
try:
output_file = output_files[i]
print("preprocessing", output_file)
d, _, dct = preprocess_fn(l)
print(output_file, dct)
if segs_from_prev_stage[i] is not None:
assert isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith(".nii.gz"), "segs_from_prev_stage" \
" must point to a " \
"segmentation file"
seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i]))
# check to see if shapes match
img = sitk.GetArrayFromImage(sitk.ReadImage(l[0]))
assert all([i == j for i, j in zip(seg_prev.shape, img.shape)]), "image and segmentation from previous " \
"stage don't have the same pixel array " \
"shape! image: %s, seg_prev: %s" % \
(l[0], segs_from_prev_stage[i])
seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1, cval=0)
seg_reshaped = to_one_hot(seg_reshaped, classes)
d = np.vstack((d, seg_reshaped)).astype(np.float32)
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
print(d.shape)
if np.prod(d.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save, 4 because float32 is 4 bytes
print(
"This output is too large for python process-process communication. "
"Saving output temporarily to disk")
np.save(output_file[:-7] + ".npy", d)
d = output_file[:-7] + ".npy"
q.put((output_file, (d, dct)))
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
print("error in", l)
print(e)
q.put("end")
if len(errors_in) > 0:
print("There were some errors in the following cases:", errors_in)
print("These cases were ignored.")
else:
print("This worker has ended successfully, no errors to report")
def preprocess_multithreaded(trainer, list_of_lists, output_files, num_processes=2, segs_from_prev_stage=None):
if segs_from_prev_stage is None:
segs_from_prev_stage = [None] * len(list_of_lists)
classes = list(range(1, trainer.num_classes))
assert isinstance(trainer, nnUNetTrainer)
q = Queue(1)
processes = []
for i in range(num_processes):
pr = Process(target=predict_save_to_queue, args=(trainer.preprocess_patient, q,
list_of_lists[i::num_processes],
output_files[i::num_processes],
segs_from_prev_stage[i::num_processes],
classes))
pr.start()
processes.append(pr)
try:
end_ctr = 0
while end_ctr != num_processes:
item = q.get()
if item == "end":
end_ctr += 1
continue
else:
yield item
finally:
for p in processes:
if p.is_alive():
p.terminate() # this should not happen but better safe than sorry right
p.join()
q.close()
def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True,
overwrite_existing=False, data_type='2d', modality=0):
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
prman = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
##################################
# Damn, finally find the model.
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds)
trainer.modality = modality
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage)
print("starting prediction...")
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
print("predicting", output_filename)
softmax = []
for p in params:
trainer.load_checkpoint_ram(p, False)
softmax.append(trainer.predict_preprocessed_data_return_softmax(d, do_tta, 1, False, 1,
trainer.data_aug_params['mirror_axes'],
True, True, 2, trainer.patch_size, True, data_type=data_type)[None])
softmax = np.vstack(softmax)
softmax_mean = np.mean(softmax, 0)
### View output
"""
output_ = softmax_mean.argmax(0)
target_ = d
if threading.current_thread() is threading.main_thread():
print("!!!output", output_.shape, target_.shape) # haw
matplotlib.use('TkAgg')
if len(target_.shape) == 4:
view_batch(output_, target_[0])
else:
plot_images(output_, target_[0])
"""
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
softmax_mean = softmax_mean.transpose([0] + [i + 1 for i in transpose_backward])
if save_npz:
npz_file = output_filename[:-7] + ".npz"
else:
npz_file = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_mean.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save
print("This output is too large for python process-process communication. Saving output temporarily to disk")
np.save(output_filename[:-7] + ".npy", softmax_mean)
softmax_mean = output_filename[:-7] + ".npy"
results.append(prman.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_mean, output_filename, dct, 1, None, None, None, npz_file), )
))
_ = [i.get() for i in results]
def predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=True, data_type='2d', modality=0):
"""
here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases
:param model: [HAW] why you call it model? it is but a path! (output_folder)
:param input_folder:
:param output_folder:
:param folds:
:param save_npz:
:param num_threads_preprocessing:
:param num_threads_nifti_save:
:param lowres_segmentations:
:param part_id:
:param num_parts:
:param tta:
:return:
"""
maybe_mkdir_p(output_folder)
#shutil.copy(join(model, 'plans.pkl'), output_folder)
case_ids = get_caseIDs_from_splitted_dataset_folder(input_folder)
output_files = [join(output_folder, i + ".nii.gz") for i in case_ids]
all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in case_ids]
if lowres_segmentations is not None:
assert isdir(lowres_segmentations), "if lowres_segmentations is not None then it must point to a directory"
lowres_segmentations = [join(lowres_segmentations, i + ".nii.gz") for i in case_ids]
assert all([isfile(i) for i in lowres_segmentations]), "not all lowres_segmentations files are present. " \
"(I was searching for case_id.nii.gz in that folder)"
lowres_segmentations = lowres_segmentations[part_id::num_parts]
else:
lowres_segmentations = None
return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, save_npz,
num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations,
tta, overwrite_existing=overwrite_existing,
data_type=data_type, modality=modality)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-m', '--model_output_folder', help='model output folder. Will automatically discover the folds '
'that were '
'run and use those as an ensemble', required=True)
parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None "
"which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble"
" these predictions with those of"
" other models. Softmax "
"probabilities will be saved as "
"compresed numpy arrays in "
"output_folder and can be merged "
"between output_folders with "
"merge_predictions.py")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres "
"stage of the cascade then you need to use -l to specify where the segmentations of the "
"corresponding lowres unet are. Here they are required to do a prediction")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--tta", required=False, type=int, default=1, help="Set to 0 to disable test time data "
"augmentation (speedup of factor "
"4(2D)/8(3D)), "
"lower quality segmentations")
parser.add_argument("--overwrite_existing", required=False, type=int, default=1, help="Set this to 0 if you need "
"to resume a previous "
"prediction. Default: 1 "
"(=existing segmentations "
"in output_folder will be "
"overwritten)")
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
model = args.model_output_folder
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
tta = args.tta
overwrite = args.overwrite_existing
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
if tta == 0:
tta = False
elif tta == 1:
tta = True
else:
raise ValueError("Unexpected value for tta, Use 1 or 0")
if overwrite == 0:
overwrite = False
elif overwrite == 1:
overwrite = True
else:
raise ValueError("Unexpected value for overwrite, Use 1 or 0")
predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=overwrite)
| python |
import os
ps_user = "sample"
ps_password = "sample"
| python |
# encoding: UTF-8
'''
vn.lts的gateway接入
'''
import os
import json
from vnltsmd import MdApi
from vnltstd import TdApi
from vnltsqry import QryApi
from ltsDataType import *
from vtGateway import *
# 以下为一些VT类型和LTS类型的映射字典
# 价格类型映射
priceTypeMap= {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["SECURITY_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["SECURITY_FTDC_OPT_AnyPrice"]
priceTypeMap[PRICETYPE_FAK] = defineDict["SECURITY_FTDC_OPT_BestPrice"]
priceTypeMap[PRICETYPE_FOK] = defineDict["SECURITY_FTDC_OPT_AllLimitPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_D_Buy"]
directionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_D_Sell"]
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict["SECURITY_FTDC_OF_Open"]
offsetMap[OFFSET_CLOSE] = defineDict["SECURITY_FTDC_OF_Close"]
offsetMap[OFFSET_CLOSETODAY] = defineDict["SECURITY_FTDC_OF_CloseToday"]
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict["SECURITY_FTDC_OF_CloseYesterday"]
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_SZSE] = 'SZE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["SECURITY_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
########################################################################################
class LtsGateway(VtGateway):
"""Lts接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='LTS'):
"""Constructor"""
super(LtsGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = LtsMdApi(self)
self.tdApi = LtsTdApi(self)
self.qryApi = LtsQryApi(self)
self.mdConnected = False
self.tdConnected = False
self.qryConnected = False
self.qryEnabled = False # 是否要启动循环查询
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json 文件
fileName = self.gatewayName + '_connect.json'
fileName = os.getcwd() + '\\ltsGateway\\' + fileName
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
mdPassword = str(setting['mdPassword'])
tdPassword = str(setting['tdPassword'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
qryAddress = str(setting['qryAddress'])
productInfo = str(setting['productInfo'])
authCode = str(setting['authCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, mdPassword, brokerID, mdAddress)
self.tdApi.connect(userID, tdPassword, brokerID, tdAddress, productInfo, authCode)
self.qryApi.connect(userID, tdPassword, brokerID, qryAddress, productInfo, authCode)
# 初始化并启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.qryApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.qryApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
if self.qryConnected:
self.qryApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class LtsMdApi(MdApi):
"""Lts行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(LtsMdApi, self).__init__()
self.gateway = gateway #gateway对象
self.gatewayName = gateway.gatewayName #gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登陆状态
self.subscribedSymbols = set()
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self,n):
"""服务器断开"""
self.connectionStatus= False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
#----------------------------------------------------------------------
def onRspError(self,error,n,last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# LTS有5档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
tick.bidPrice2 = data['BidPrice2']
tick.bidVolume2 = data['BidVolume2']
tick.askPrice2 = data['AskPrice2']
tick.askVolume2 = data['AskVolume2']
tick.bidPrice3 = data['BidPrice3']
tick.bidVolume3 = data['BidVolume3']
tick.askPrice3 = data['AskPrice3']
tick.askVolume3 = data['AskVolume3']
tick.bidPrice4 = data['BidPrice4']
tick.bidVolume4 = data['BidVolume4']
tick.askPrice4 = data['AskPrice4']
tick.askVolume4 = data['AskVolume4']
tick.bidPrice5 = data['BidPrice5']
tick.bidVolume5 = data['BidVolume5']
tick.askPrice5 = data['AskPrice5']
tick.askVolume5 = data['AskVolume5']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
req = {}
req['InstrumentID'] = str(subscribeReq.symbol)
req['ExchangeID'] = exchangeMap.get(str(subscribeReq.exchange), '')
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(req)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsTdApi(TdApi):
"""LTS交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['CombOffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['CombOffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = float(data['LimitPrice'])
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = float(data['Price'])
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFundOutByLiber(self, data, error, n, last):
"""LTS发起出金应答"""
pass
#----------------------------------------------------------------------
def onRtnFundOutByLiber(self, data):
"""LTS发起出金通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundOutByLiber(self, data, error):
"""LTS发起出金错误回报"""
pass
#----------------------------------------------------------------------
def onRtnFundInByBank(self, data):
"""银行发起入金通知"""
pass
#----------------------------------------------------------------------
def onRspFundInterTransfer(self, data, error, n, last):
"""资金内转应答"""
pass
#----------------------------------------------------------------------
def onRtnFundInterTransferSerial(self, data):
"""资金内转流水通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundInterTransfer(self, data, error):
"""资金内转错误回报"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcTraderApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = str(orderReq.symbol)
req['LimitPrice'] = str(orderReq.price) # LTS里的价格是字符串
req['VolumeTotalOriginal'] = int(orderReq.volume)
req['ExchangeID'] = exchangeMap.get(orderReq.exchange, '')
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['OrderPriceType'] = priceTypeMap[orderReq.priceType]
req['Direction'] = directionMap[orderReq.direction]
req['CombOffsetFlag'] = offsetMap[orderReq.offset]
req['ExchangeID'] = exchangeMap[orderReq.exchange]
except KeyError:
return ''
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['SECURITY_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['SECURITY_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['SECURITY_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['SECURITY_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['SECURITY_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
req['UserForceClose'] = 0
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['SECURITY_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsQryApi(QryApi):
"""Lts账户查询实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsQryApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.qryConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登录完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.qryConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['ExecPrice']
contract.underlyingSymbol = data['MarketID']
# 合约类型
if data['ProductClass'] == '1':
contract.productClass = PRODUCT_FUTURES
elif data['ProductClass'] == '2':
contract.productClass = PRODUCT_OPTION
elif data['ProductClass'] == '3':
contract.productClass = PRODUCT_COMBINATION
elif data['ProductClass'] == '6':
contract.productClass = PRODUCT_EQUITY
elif data['ProductClass'] == '8':
contract.productClass = PRODUCT_EQUITY
else:
print data['ProductClass']
# 期权类型
if data['InstrumentType'] == '1':
contract.optionType = OPTION_CALL
elif data['InstrumentType'] == '2':
contract.optionType = OPTION_PUT
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
#account.closeProfit = data['CloseProfit']
#account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = data['Balance']
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryBondInterest(self, data, error, n, last):
"""债券利息查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryMarketRationInfo(self, data, error, n, last):
"""市值配售查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
"""合约手续费查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFInstrument(self, data, error, n, last):
"""ETF基金查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFBasket(self, data, error, n, last):
"""ETF股票篮查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryOFInstrument(self, data, error, n, last):
"""OF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQrySFInstrument(self, data, error, n, last):
"""SF合约查询回报"""
event1 = Event(type_=EVENT_LTS_SF)
event1.dict_['data'] = data
self.gateway.eventEngine.put(event1)
symbol = data['InstrumentID']
exchange = exchangeMapReverse[data['ExchangeID']]
vtSymbol = '.'.join([symbol, exchange])
event2 = Event(type_=EVENT_LTS_SF + vtSymbol)
event2.dict_['data'] = data
self.gateway.eventEngine.put(event2)
#----------------------------------------------------------------------
def onRspQryInstrumentUnitMargin(self, data, error, n, last):
"""查询单手保证金"""
pass
#----------------------------------------------------------------------
def onRspQryPreDelivInfo(self, data, error, n , last):
"""查询预交割信息"""
pass
#----------------------------------------------------------------------
def onRsyQryCreditStockAssignInfo(self, data, error, n, last):
"""查询可融券分配"""
pass
#----------------------------------------------------------------------
def onRspQryCreditCashAssignInfo(self, data, error, n , last):
"""查询可融资分配"""
pass
#----------------------------------------------------------------------
def onRsyQryConversionRate(self, data, error, n, last):
"""查询证券这算率"""
pass
#----------------------------------------------------------------------
def onRspQryHisCreditDebtInfo(self,data, error, n, last):
"""查询历史信用负债"""
pass
#----------------------------------------------------------------------
def onRspQryMarketDataStaticInfo(self, data, error, n, last):
"""查询行情静态信息"""
pass
#----------------------------------------------------------------------
def onRspQryExpireRepurchInfo(self, data, error, n, last):
"""查询到期回购信息响应"""
pass
#----------------------------------------------------------------------
def onRspQryBondPledgeRate(self, data, error, n, last):
"""查询债券质押为标准券比例"""
pass
#----------------------------------------------------------------------
def onRspQryPledgeBond(self, data, error, n, last):
"""查询债券质押代码对照关系"""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['InstrumentID']
pos.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 方向和持仓冻结数量
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
pos.frozen = data['LongFrozen']
elif pos.direction == DIRECTION_SHORT:
pos.frozen = data['ShortFrozen']
# 持仓量
pos.position = data['Position']
pos.ydPosition = data['YdPosition']
# 持仓均价
if pos.position:
pos.price = data['PositionCost'] / pos.position
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def OnRspQryFundTransferSerial(self, data, error, n, last):
"""资金转账查询"""
pass
#----------------------------------------------------------------------
def onRspQryFundInterTransferSerial(self, data, error,n, last):
"""资金内转流水查询"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcQueryApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
#是否需要INVESTERID, BROKERID?
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryTradingAccount(req, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
| python |
from django.apps import AppConfig
class KoperationConfig(AppConfig):
name = 'koperation'
| python |
from scraper.scraper import Scraper
from scraper.template import Template
def start_scraping():
job_name = input('Enter job name: ')
place = input('Enter place: ')
radius = int(input('Enter radius: '))
scraper = Scraper(job_name, place, radius)
print(f'URL: {scraper.page.url}, Place: {scraper.location}, Job name: \
{scraper.job_name}\n')
template = Template(scraper.offers, scraper.number_of_offers)
if __name__ == '__main__':
start_scraping()
| python |
class Initializer:
def __init__(self, interval):
self.interval = interval
| python |
from django.apps import AppConfig
class RatingsConfig(AppConfig):
name = 'authors.apps.ratings'
| python |
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt.modules
class Encoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
inputSize = opt.word_vec_size
super(Encoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
self.rnn = nn.LSTM(inputSize, self.hidden_size,
num_layers=opt.layers,
dropout=opt.dropout,
bidirectional=opt.brnn)
# self.rnn.bias_ih_l0.data.div_(2)
# self.rnn.bias_hh_l0.data.copy_(self.rnn.bias_ih_l0.data)
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_enc)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, hidden=None):
batch_size = input.size(0) # batch first for multi-gpu compatibility
emb = self.word_lut(input).transpose(0, 1)
if hidden is None:
h_size = (self.layers * self.num_directions, batch_size, self.hidden_size)
h_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
c_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
hidden = (h_0, c_0)
outputs, hidden_t = self.rnn(emb, hidden)
return hidden_t, outputs
class StackedLSTM(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
for i in range(num_layers):
layer = nn.LSTMCell(input_size, rnn_size)
self.add_module('layer_%d' % i, layer)
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i in range(self.num_layers):
layer = getattr(self, 'layer_%d' % i)
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class Decoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
super(Decoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
self.attn = onmt.modules.GlobalAttention(opt.rnn_size)
self.dropout = nn.Dropout(opt.dropout)
# self.rnn.bias_ih.data.div_(2)
# self.rnn.bias_hh.data.copy_(self.rnn.bias_ih.data)
self.hidden_size = opt.rnn_size
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_dec)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, hidden, context, init_output):
emb = self.word_lut(input).transpose(0, 1)
batch_size = input.size(0)
h_size = (batch_size, self.hidden_size)
output = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
# n.b. you can increase performance if you compute W_ih * x for all
# iterations in parallel, but that's only possible if
# self.input_feed=False
outputs = []
output = init_output
for i, emb_t in enumerate(emb.chunk(emb.size(0), dim=0)):
emb_t = emb_t.squeeze(0)
if self.input_feed:
emb_t = torch.cat([emb_t, output], 1)
output, h = self.rnn(emb_t, hidden)
output, attn = self.attn(output, context.t())
output = self.dropout(output)
outputs += [output]
outputs = torch.stack(outputs)
return outputs.transpose(0, 1), h, attn
class NMTModel(nn.Module):
def __init__(self, encoder, decoder, generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.generate = False
def set_generate(self, enabled):
self.generate = enabled
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
def _fix_enc_hidden(self, h):
# the encoder hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
if self.encoder.num_directions == 2:
return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \
.transpose(1, 2).contiguous() \
.view(h.size(0) // 2, h.size(1), h.size(2) * 2)
else:
return h
def forward(self, input):
src = input[0]
tgt = input[1][:, :-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src)
init_output = self.make_init_decoder_output(context)
enc_hidden = (self._fix_enc_hidden(enc_hidden[0]),
self._fix_enc_hidden(enc_hidden[1]))
out, dec_hidden, _attn = self.decoder(tgt, enc_hidden, context, init_output)
if self.generate:
out = self.generator(out)
return out
| python |
import torch.utils.data as data
from torchvision import transforms
from .cifar import CorruptionDataset, cifar_transform, imagenet_transform
from .visda import VisDaTest, visda_test_transforms
from .adversarial import ImagenetAdversarial, imageneta_transforms
from .randaugment import RandAugment
from .augmix import AugMix
class WrapperDataset(data.Dataset):
def __init__(self, dataset, augmentations, transforms=None, multi_out=True):
super().__init__()
self.dataset = dataset
self.transforms = transforms
self.augmentations = augmentations if transforms else lambda *args: augmentations(args[0])
self.multi_out = multi_out
def __getitem__(self, index):
x, y = self.dataset[index]
if self.multi_out:
im_tuple = (self.transforms(x), self.augmentations(x), self.augmentations(x))
else:
im_tuple = (self.augmentations(x), )
return im_tuple, y
def __len__(self):
return len(self.dataset)
def get_dataset(dataset, augmentation, corruption=None, level=None, **aug_args):
if dataset == 'visda':
dataset = VisDaTest()
transform = visda_test_transforms
elif dataset in ['imagenet', 'cifar100', 'cifar10']:
transform = imagenet_transform if dataset == 'imagenet' else cifar_transform
dataset = CorruptionDataset(dataset, corruption=corruption, level=level)
elif dataset == 'imageneta':
transform = imageneta_transforms
dataset = ImagenetAdversarial()
if augmentation.lower() == 'randaugment':
augmentation = transforms.Compose([RandAugment(**aug_args), transform])
elif augmentation.lower() == 'augmix':
augmentation = AugMix(base_transforms=transform, **aug_args)
return WrapperDataset(dataset, augmentations=augmentation, transforms=transform)
| python |
# -*- coding: utf-8 -*-
"""
Python Collection Of Functions.
Package with collection of small useful functions.
Bytes calculator
"""
def bytes2human(size, *, unit="", precision=2, base=1024):
"""
Convert number in bytes to human format.
Arguments:
size (int): bytes to be converted
Keyword arguments (opt):
unit (str): If it will convert bytes to a specific unit
'KB', 'MB', 'GB', 'TB', 'PB', 'EB'
precision (int): number of digits after the decimal point
base (int): 1000 - for decimal base
1024 - for binary base (it is the default)
Returns:
(int): number
(str): unit ('Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
Example:
>>> bytes2human(10)
('10.00', 'Bytes')
>>> bytes2human(2048)
('2.00', 'KB')
>>> bytes2human(27273042329)
('25.40', 'GB')
>>> bytes2human(27273042329, precision=1)
('25.4', 'GB')
>>> bytes2human(27273042329, unit='MB')
('26009.60', 'MB')
"""
# validate parameters
if not isinstance(precision, int):
raise ValueError("precision is not a number")
if not isinstance(base, int):
raise ValueError("base is not a number")
try:
num = float(size)
except ValueError:
raise ValueError("value is not a number")
suffix = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]
# If it needs to convert bytes to a specific unit
if unit:
try:
num = num / base ** suffix.index(unit)
except ValueError:
raise ValueError("Error: unit must be {}".format(", ".join(suffix[1:])))
return "{0:.{prec}f}".format(num, prec=precision), unit
# Calculate the greatest unit for the that size
for counter, suffix_unit in enumerate(suffix):
if num < base:
return "{0:.{prec}f}".format(num, prec=precision), suffix_unit
if counter == len(suffix) - 1:
raise ValueError("value greater than the highest unit")
num /= base
def human2bytes(size, unit, *, precision=2, base=1024):
"""
Convert size from human to bytes.
Arguments:
size (int): number
unit (str): converts from this unit to bytes
'KB', 'MB', 'GB', 'TB', 'PB', 'EB'
Keyword arguments (opt):
precision (int): number of digits after the decimal point
default is 2
base (int): 1000 - for decimal base
1024 - for binary base (it is the default)
Returns:
(int) number in bytes
Example:
>>> human2bytes(10, 'GB')
'10737418240.00'
>>> human2bytes(10, 'GB', precision=0)
'10737418240'
>>> human2bytes(10, 'PB')
'11258999068426240.00'
"""
dic_power = {
"KB": base,
"MB": base ** 2,
"GB": base ** 3,
"TB": base ** 4,
"PB": base ** 5,
"EB": base ** 6,
"ZB": base ** 7,
}
if unit not in dic_power:
raise ValueError(
"invalid unit. It must be {}".format(", ".join(dic_power.keys()))
)
try:
num_bytes = float(size) * int(dic_power[unit])
except ValueError:
raise ValueError("value is not a number")
return "{0:.{prec}f}".format(num_bytes, prec=precision)
def bandwidth_converter(
number, *, from_unit, to_unit, from_time="seconds", to_time="seconds"
):
"""
Bandwidth Calculator.
Convert data rate from one unit to another.
Arguments:
number (int): number to be converted
Keyword arguments:
from_unit (str): convert from this data unit. Example:
(bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...)
to_unit (str): convert to this data unit. Example:
(bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...)
Keyword arguments (opt):
from_time (str): Specify the time frame used in from_unit
(seconds, minutes, hours, days, months)
default: seconds
to_time (str): Specify the time frame used in to_unit
(seconds, minutes, hours, days, months)
default: seconds
bps, Kbps, Mbps, Gbps... = decimal base = 1000^n
KB, MB, GB, TB... = decimal base = 1000^n
KiB, MiB, GiB, TiB... = binary base = 1024^n
References:
- https://en.wikipedia.org/wiki/Units_of_information
- https://physics.nist.gov/cuu/Units/binary.html
Returns: tuple
(number_converted, to_unit/to_time)
Example:
>>> bandwidth_converter(100, from_unit="Mbps", to_unit="MB")
(12.5, 'MB/seconds')
>>> bandwidth_converter(100, from_unit="Mbps", to_unit="GB", to_time="hours")
(45.0, 'GB/hours')
>>> bandwidth_converter(1, from_unit="Gbps", to_unit="MB")
(125.0, 'MB/seconds')
>>> bandwidth_converter(10, from_unit="Gbps", to_unit="GB")
(1.25, 'GB/seconds')
>>> bandwidth_converter(10, from_unit="Gbps", to_unit="TB", to_time="hours")
(4.5, 'TB/hours')
>>> bandwidth_converter(10, from_unit="GB", to_unit="Gbps")
(80.0, 'Gbps/seconds')
>>> Convert 2.25 GB per hours to Mbps # doctest: +SKIP
>>> bandwidth_converter(2.25, from_unit="GB", from_time="hours", to_unit="Mbps", to_time="seconds") # noqa
(5.0, 'Mbps/seconds')
"""
unit_power = {
"bps": 1,
"Kbps": 1000,
"Mbps": 1000 ** 2,
"Gbps": 1000 ** 3,
"Tbps": 1000 ** 4,
"Pbps": 1000 ** 5,
"Ebps": 1000 ** 6,
"Bytes": 1,
"KB": 1000,
"MB": 1000 ** 2,
"GB": 1000 ** 3,
"TB": 1000 ** 4,
"PB": 1000 ** 5,
"EB": 1000 ** 6,
"KiB": 1024,
"MiB": 1024 ** 2,
"GiB": 1024 ** 3,
"TiB": 1024 ** 4,
"PiB": 1024 ** 5,
"EiB": 1024 ** 6,
}
time_in_sec = {
"seconds": 1,
"minutes": 60,
"hours": 3600,
"days": 3600 * 24,
"months": 3600 * 24 * 30,
}
if from_unit not in unit_power or to_unit not in unit_power:
raise ValueError(
"invalid unit. It must be {}".format(", ".join(unit_power.keys()))
)
if from_time not in time_in_sec or to_time not in time_in_sec:
raise ValueError(
"invalid time. It must be {}".format(", ".join(time_in_sec.keys()))
)
# Convert input number to bps
bps = (float(number) * int(unit_power[from_unit])) / time_in_sec[from_time]
if not from_unit.endswith("bps"):
bps = bps * 8
# to_unit is bits or bytes
new_unit = bps if to_unit.endswith("bps") else bps / 8
# Convert to new unit
new_unit = (new_unit / unit_power[to_unit]) * time_in_sec[to_time]
return new_unit, "{}/{}".format(to_unit, to_time)
# vim: ts=4
| python |
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib
PLOT_TYPE_TEXT = False # For indices
PLOT_VECTORS = True # For original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project the original feature onto the principal component feature-space,
# Scaling the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
# Sorting each column by its length.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Using an arrow to project each original feature as a
# labeled vector on the principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import PCA
import sklearn
print sklearn.__version__
model = PCA(n_components=dimensions, svd_solver='randomized', random_state=7)
model.fit(data)
return model
def doKMeans(data, clusters=0):
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = clusters)
kmeans.fit(data)
model = kmeans.predict(data)
model = kmeans
return model.cluster_centers_, model.labels_
import os
os.chdir("Datasets")
df = pd.read_csv("Wholesale customers data.csv", sep=',', header = 0)
# Setting Nans to 0
df.fillna(0)
df.drop(['Channel','Region'], axis = 1, inplace = True)
df.plot.hist()
# Removing top 5 and bottom 5 samples for each column to reduce big gaps
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# Dropping rows by index.
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
#
# Un-commenting one line at a time before running the code
T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
T = df # No Change
# KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
#
# Printing out the centroids.
print(centroids)
# Projecting the centroids and samples into the new 2D feature space
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualizing all the samples and giving them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plotting the index of the sample
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plotting a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plotting the Centroids as X's
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Displaying the feature vectors
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Adding the cluster label back into the dataframe
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show()
| python |
from phenotype.Core.Auxiliary import (
__apply__,
__identity__,
)
def Lookup(key_func=__identity__,val_func=__identity__): return __apply__(key_func,val_func)
class Hasher(dict):
''' '''
__key_value_function__ = Lookup(id)
__key__ = id
@classmethod
def __key_value__(cls, item):
''' '''
return cls.__key_value_function__(item)
def __init__(self, *items):
''' '''
super().__init__( map( self.__key_value_function__, items ) )
def __len__(self):
''' '''
return len(self._mapping)
def __contains__(self, item):
''' '''
return self.__key__(item) in self._mapping.keys()
def __iter__(self):
''' '''
yield from self._mapping.items()
def __getitem__(self, item):
''' '''
hashed = self.__key__(item)
return self.get(hashed,None)
def __call__(self, item):
''' '''
hashed = self.__key__(item)
self._mapping[hashed] = item
return hashed
| python |
import math
import unittest
from typing import *
import mock
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tensorkit.distributions import Categorical, FlowDistribution, UnitNormal
from tensorkit.distributions.utils import copy_distribution
from tensorkit.flows import ReshapeFlow, ActNorm
from tensorkit.tensor import Tensor, float_scalar_like, int_range
from tests.helper import *
class _MyFlow(tk.flows.Flow):
def _transform(self,
input: Tensor,
input_log_det: Optional[Tensor],
inverse: bool,
compute_log_det: bool
) -> Tuple[Tensor, Optional[Tensor]]:
if inverse:
output = input * 2.0 + 1
event_ndims = self.x_event_ndims
else:
output = (input - 1.0) * 0.5
event_ndims = self.y_event_ndims
if compute_log_det:
if inverse:
output_log_det = float_scalar_like(-math.log(2.), output)
else:
output_log_det = float_scalar_like(math.log(2.), output)
for axis in int_range(-event_ndims, 0):
output_log_det = output_log_det * output.shape[axis]
if input_log_det is not None:
output_log_det = output_log_det + input_log_det
else:
output_log_det: Optional[Tensor] = None
return output, output_log_det
def check_flow_distribution(ctx,
distribution,
flow):
min_event_ndims = flow.get_y_event_ndims()
max_event_ndims = (distribution.value_ndims +
(flow.get_y_event_ndims() - flow.get_x_event_ndims()))
def fn(event_ndims, reparameterized, validate_tensors):
# construct the instance
kwargs = {}
if reparameterized is not None:
kwargs['reparameterized'] = reparameterized
else:
reparameterized = distribution.reparameterized
if event_ndims is not None:
kwargs['event_ndims'] = event_ndims
else:
event_ndims = flow.get_y_event_ndims()
if validate_tensors is not None:
kwargs['validate_tensors'] = validate_tensors
else:
validate_tensors = distribution.validate_tensors
d = FlowDistribution(distribution, flow, **kwargs)
# check the instance
def log_prob_fn(t):
log_px = distribution.log_prob(t.transform_origin.tensor,
group_ndims=0)
y, log_det = flow(t.transform_origin.tensor) # y and log |dy/dx|
assert_allclose(y, t.tensor, atol=1e-4, rtol=1e-6)
ctx.assertEqual(
T.rank(log_det),
T.rank(log_px) - (flow.get_x_event_ndims() - distribution.event_ndims)
)
return -log_det + T.reduce_sum(
log_px, T.int_range(
-(flow.get_x_event_ndims() - distribution.event_ndims),
0
)
)
check_distribution_instance(
ctx=ctx,
d=d,
event_ndims=event_ndims,
batch_shape=distribution.batch_shape[: max_event_ndims - event_ndims],
min_event_ndims=min_event_ndims,
max_event_ndims=max_event_ndims,
log_prob_fn=log_prob_fn,
transform_origin_distribution=distribution,
transform_origin_group_ndims=flow.get_x_event_ndims() - distribution.event_ndims,
# other attributes
base_distribution=distribution,
flow=flow,
dtype=distribution.dtype,
continuous=distribution.continuous,
reparameterized=reparameterized,
validate_tensors=validate_tensors,
)
for event_ndims in (None,
min_event_ndims,
(min_event_ndims + max_event_ndims) // 2,
max_event_ndims):
fn(event_ndims, None, None)
for reparameterized in (None, True, False):
fn(None, reparameterized, None)
for validate_tensors in (None, True, False):
fn(None, None, validate_tensors)
class FlowDistributionTestCase(TestCase):
def test_FlowDistribution(self):
check_flow_distribution(
self,
UnitNormal([], event_ndims=0),
_MyFlow(x_event_ndims=0, y_event_ndims=0, explicitly_invertible=True),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=0),
_MyFlow(x_event_ndims=0, y_event_ndims=0, explicitly_invertible=True),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=0),
ActNorm(4),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=1),
ReshapeFlow([-1], [-1, 1]),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=1),
ReshapeFlow([-1, 1], [-1]),
)
# errors in constructor
with pytest.raises(TypeError,
match='`distribution` is not an instance of '
'`Distribution`'):
_ = FlowDistribution(object(), ActNorm(3))
with pytest.raises(TypeError, match='`flow` is not a flow'):
_ = FlowDistribution(UnitNormal([3]), object())
with pytest.raises(ValueError,
match='cannot be transformed by a flow, because '
'it is not continuous'):
_ = FlowDistribution(Categorical(logits=[0., 1., 2.]), ActNorm(3))
with pytest.raises(ValueError,
match='cannot be transformed by a flow, because '
'its `dtype` is not floating point'):
normal = UnitNormal([3])
normal.dtype = T.int32
_ = FlowDistribution(normal, ActNorm(3))
with pytest.raises(ValueError,
match='`distribution.event_ndims <= flow.'
'x_event_ndims <= distribution.value_ndims` '
'is not satisfied'):
_ = FlowDistribution(UnitNormal([2, 3, 4], event_ndims=2),
ActNorm(4))
with pytest.raises(ValueError,
match='`distribution.event_ndims <= flow.'
'x_event_ndims <= distribution.value_ndims` '
'is not satisfied'):
_ = FlowDistribution(UnitNormal([2, 3, 4], event_ndims=2),
_MyFlow(x_event_ndims=4, y_event_ndims=4,
explicitly_invertible=True))
with pytest.raises(ValueError,
match='`event_ndims` out of range: .* '
'minimum allowed value is 2, .* '
'maximum allowed value is 4'):
_ = FlowDistribution(
UnitNormal([2, 3, 4]), ReshapeFlow([-1], [-1, 1]), event_ndims=1)
with pytest.raises(ValueError,
match='`event_ndims` out of range: .* '
'minimum allowed value is 2, .* '
'maximum allowed value is 4'):
_ = FlowDistribution(
UnitNormal([2, 3, 4]), ReshapeFlow([-1], [-1, 1]), event_ndims=5)
def test_copy(self):
normal = UnitNormal([2, 3, 5], dtype=T.float64, validate_tensors=True)
flow = ActNorm(5)
distrib = FlowDistribution(normal, flow)
self.assertEqual(distrib.event_ndims, 1)
self.assertTrue(distrib.reparameterized)
self.assertTrue(distrib.validate_tensors)
with mock.patch('tensorkit.distributions.flow.copy_distribution',
wraps=copy_distribution) as f_copy:
distrib2 = distrib.copy(event_ndims=2, reparameterized=False,
validate_tensors=False)
self.assertIsInstance(distrib2, FlowDistribution)
self.assertIs(distrib2.flow, flow)
self.assertIsInstance(distrib2.base_distribution, UnitNormal)
self.assertEqual(distrib2.reparameterized, False)
self.assertEqual(distrib2.event_ndims, 2)
self.assertFalse(distrib2.validate_tensors)
self.assertEqual(f_copy.call_args, ((), {
'cls': FlowDistribution,
'base': distrib,
'attrs': (('distribution', '_base_distribution'), 'flow',
'reparameterized', 'event_ndims', 'validate_tensors'),
'overrided_params': {'event_ndims': 2,
'reparameterized': False,
'validate_tensors': False},
}))
| python |
import toml
import argparse
import numpy as np
from scipy.stats import entropy
from pom import POM
from sample_script import get_points_covered_by_lidar_config
def evaluate(map, pom_params, lidar_params, config):
points = get_points_covered_by_lidar_config(
pom_params, lidar_params, config, lidar_params['lidar_nos']
)
H_entropy = 0.0 # -plogp-(1-p)log(1-p)
total_entropy = 0.0
for x in map:
for xy in x:
for xyz in xy:
if xyz == 0.0 or xyz == 1:
continue
total_entropy += entropy([1 - xyz, xyz])
for point in range(len(points)):
p = map[points[point]]
if p == 0.0 or p == 1:
continue
H_entropy += entropy([1-p,p])
return H_entropy, total_entropy, total_entropy - H_entropy
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p','--params', type=str, default="multihyper.toml", help="Params")
parser.add_argument('-c','--configuration', type=str, default="config.toml", help="Configuration")
args = parser.parse_args()
params = toml.load(args.params)
configs = toml.load(args.configuration)['config']
pom_car, num_valid_frames_car = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/vehicle"
)
print(111)
pom_car = pom_car.astype(float) / num_valid_frames_car
pom_ped, num_valid_frames_ped = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/pedestrian"
)
pom_ped = pom_ped.astype(float) / num_valid_frames_ped
pom_cyc, num_valid_frames_cyc = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/cyclist"
)
pom_cyc = pom_cyc.astype(float) / num_valid_frames_cyc
type = ['square', 'center', 'line', 'pyramid', 'trapezoid', 'line_roll', 'pyramid_roll',
'pyramid_pitch']
pom_list = [('car', pom_car), ('ped', pom_ped), ('cyc', pom_cyc)]
for key, config in configs.items():
for pom in pom_list:
H_entropy, total_entropy, IG = evaluate(pom[1], params['pom'], params['lidar'], config)
print(
f"Key {type[int(key)]}, {pom[0]}: H_entropy {H_entropy}, total_entropy {total_entropy}, IG {IG}") | python |
from cubelang.actions import Action
from cubelang.cube import Cube
from cubelang.orientation import Orientation, Side, Color
from cubelang.cli.cube_builder import apply_side, CubeBuilder
from pytest import raises
from unittest import mock
import pytest
import string
import argparse
from typing import List
class TestApplySide:
orientation = Orientation(Side.RIGHT, Side.BOTTOM)
def test_apply_side(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED], [Color.ORANGE, Color.GREEN]]
apply_side(cube, self.orientation, colors)
actual_colors = [[cube.get_side(self.orientation).colors[i, j] for j in [0, 1]] for i in [0, 1]]
assert colors == actual_colors
def test_wrong_columns(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED, Color.BLUE], [Color.ORANGE, Color.GREEN, Color.BLUE]]
with raises(argparse.ArgumentTypeError) as e:
apply_side(cube, self.orientation, colors)
assert str(e.value) == "Incorrect number of columns"
def test_wrong_lines(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED]]
with raises(argparse.ArgumentTypeError) as e:
apply_side(cube, self.orientation, colors)
assert str(e.value) == "Incorrect number of lines"
class MockAction (Action):
def __init__(self, results: List[str], name: str):
self.results = results
self.name = name
def perform(self, cube: Cube, orientation: Orientation) -> Orientation:
self.results.append(self.name)
return Orientation(Side.LEFT, Side.RIGHT)
class TestBuilder:
def test_create(self):
builder = CubeBuilder((2, 2, 2))
cube, orientation = builder.get()
assert cube.shape == (2, 2, 2)
assert orientation.top == Side.TOP
assert orientation.front == Side.FRONT
@mock.patch("cubelang.cli.cube_builder.apply_side")
@pytest.mark.parametrize("side, exp_orientation", [
(Side.FRONT, Orientation(Side.FRONT, Side.TOP)),
(Side.LEFT, Orientation(Side.LEFT, Side.TOP)),
(Side.RIGHT, Orientation(Side.RIGHT, Side.TOP)),
(Side.BACK, Orientation(Side.BACK, Side.TOP)),
(Side.TOP, Orientation(Side.TOP, Side.BACK)),
(Side.BOTTOM, Orientation(Side.BOTTOM, Side.FRONT))
])
def test_side(self, apply_side_fn, side, exp_orientation):
builder = CubeBuilder((2, 2, 2))
builder.side(side, [])
apply_side_fn.assert_called_once_with(builder.cube, exp_orientation, [])
def test_scramble(self):
result = []
actions = [MockAction(result, string.ascii_uppercase[i]) for i in range(10)]
builder = CubeBuilder((2, 2, 2))
builder.scramble(actions)
_, orientation = builder.get()
assert orientation == Orientation(Side.LEFT, Side.RIGHT)
assert result == list("ABCDEFGHIJ")
| python |
"""
线程锁-互斥锁
为什么要使用线程锁分析:https://blog.csdn.net/JackLiu16/article/details/81267176
互斥锁运行顺序分析:https://blog.csdn.net/weixin_40481076/article/details/101594705
"""
import threading,time
#实例化一个互斥锁对象
lock = threading.Lock()
def run():
lock.acquire() #获取锁
print(threading.current_thread().getName(),time.ctime())
time.sleep(5)
lock.release() #释放锁
for _ in range(10):
t = threading.Thread(target=run)
t.start()
| python |
import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagMC_QCD_800_1000_cfi import *
btagMC_QCD_800_1000Path = cms.Path(btagMC_QCD_800_1000)
| python |
def getLocation(config):
config['serverType']="regularExperiment"
config['serverPort']=2345
config['webSocketPort']=3456
ip="localhost"
config["domain"]="http://"+ip+":"+str(config['serverPort'])
config["websocketURL"]="ws://"+ip+":"+str(config['webSocketPort'])
return config | python |
import torch.nn as nn
import torch
class Density(nn.Module):
def __init__(self, params_init={}):
super().__init__()
for p in params_init:
param = nn.Parameter(torch.tensor(params_init[p]))
setattr(self, p, param)
def forward(self, sdf, beta=None):
return self.density_func(sdf, beta=beta)
class LaplaceDensity(Density): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)
def __init__(self, params_init={}, beta_min=0.0001):
super().__init__(params_init=params_init)
self.beta_min = torch.tensor(beta_min).cuda()
def density_func(self, sdf, beta=None):
if beta is None:
beta = self.get_beta()
alpha = 1 / beta
return alpha * (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() / beta))
def get_beta(self):
beta = self.beta.abs() + self.beta_min
return beta
class AbsDensity(Density): # like NeRF++
def density_func(self, sdf, beta=None):
return torch.abs(sdf)
class SimpleDensity(Density): # like NeRF
def __init__(self, params_init={}, noise_std=1.0):
super().__init__(params_init=params_init)
self.noise_std = noise_std
def density_func(self, sdf, beta=None):
if self.training and self.noise_std > 0.0:
noise = torch.randn(sdf.shape).cuda() * self.noise_std
sdf = sdf + noise
return torch.relu(sdf)
| python |
#!/usr/bin/env python3
import matplotlib.pylab as plt
import numpy as np
from astropy import units as u
from ctapipe.io import event_source
from ctapipe.utils import datasets
from ctapipe.visualization import ArrayDisplay
if __name__ == "__main__":
plt.figure(figsize=(9.5, 8.5))
# load up a single event, so we can get the subarray info:
source = event_source(
datasets.get_dataset_path("gamma_test_large.simtel.gz"), max_events=1,
)
event = next(iter(source))
# display the array
subarray = source.subarray
ad = ArrayDisplay(subarray, tel_scale=3.0)
print("Now setting vectors")
plt.pause(1.0)
plt.tight_layout()
for phi in np.linspace(0, 360, 30) * u.deg:
r = np.cos(phi / 2)
ad.set_vector_rho_phi(r, phi)
plt.pause(0.01)
ad.set_vector_rho_phi(0, 0 * u.deg)
plt.pause(1.0)
print("Now setting values")
ad.telescopes.set_linewidth(0)
for ii in range(50):
vals = np.random.uniform(100.0, size=subarray.num_tels)
ad.values = vals
plt.pause(0.01)
print("Setting labels")
for ii in range(3):
ad.add_labels()
plt.pause(0.5)
ad.remove_labels()
plt.pause(0.5)
| python |
lists = ['1', '2', '3']
print(lists[3])
| python |
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import scipy as sp
import numpy as np
import scipy.ndimage
from cyclic_gps.models import LEGFamily
from cyclic_gps.data_utils import time_series_dataset
import matplotlib.pyplot as plt
num_datapoints = 1000
DTYPE = torch.double
RANK = 5
MAX_EPOCHS = 800
OPTIMIZER = "ADAM" #or "ADAM" || "BFGS"
with open("../numpy_arrays/all_ts_2.npy", "rb") as f:
all_ts = np.load(f)
with open("../numpy_arrays/all_vals_2.npy", "rb") as f:
all_vals = np.load(f)
all_ts = torch.from_numpy(all_ts)
all_vals = torch.from_numpy(all_vals)
print(all_ts.shape)
print(all_vals.shape)
# create a torch dataset, and add a batch dim of zero
dataset = time_series_dataset(all_ts, all_vals)
example = dataset[0]
#print("example datatype: {}".format(example[0].dtype))
assert torch.allclose(example[0], all_ts.unsqueeze(0))
dl = DataLoader(dataset=dataset, batch_size=1)
leg_model = LEGFamily(rank=RANK, obs_dim=all_vals.shape[2], train=True, optimizer=OPTIMIZER, data_type=DTYPE)
leg_model.double()
trainer = pl.Trainer(max_epochs=MAX_EPOCHS)
trainer.fit(model=leg_model, train_dataloaders=dl)
#print(leg_model.G)
leg_model.register_model_matrices_from_params()
#print(leg_model.G)
PATH_TO_NPY = "../numpy_arrays/"
with open(PATH_TO_NPY + "sample3_ts_2.npy", "rb") as f:
sample3_ts = np.load(f)
with open(PATH_TO_NPY + "sample3_vals_2.npy", "rb") as f:
sample3_vals = np.load(f)
sample3_ts = torch.from_numpy(sample3_ts)
sample3_vals = torch.from_numpy(sample3_vals)
# sample3_ts_chopped = sample3_ts[:200]
# sample3_vals_chopped = sample3_vals[:200]
# forecast_times = sample3_ts[200:300]
sample3_ts_chopped = torch.cat([sample3_ts[:200], sample3_ts[-200:]], dim=0)
sample3_vals_chopped = torch.cat([sample3_vals[:200], sample3_vals[-200:]], dim=0)
print("sample_3 shapes: ts:{}, vals:{}".format(sample3_ts_chopped.shape, sample3_vals_chopped.shape))
with open(PATH_TO_NPY + "forecast_times_2.npy", "rb") as f:
forecast_times = np.load(f)
forecast_times = torch.from_numpy(forecast_times)
pred_means, pred_variances = leg_model.make_predictions(sample3_ts_chopped, sample3_vals_chopped, forecast_times)
#print("data type precision:{}".format(pred_means.dtype))
pred_means = pred_means.detach().numpy()
pred_variances = pred_variances.detach().numpy()
plt.scatter(sample3_ts_chopped, sample3_vals_chopped[:, 0], label='observed data')
plt.scatter(sample3_ts[200:-200], sample3_vals[200:-200][:, 0],label='censored data')
plt.plot(forecast_times, pred_means[:,0], 'C1', label='interpolation/forecasting')
plt.fill_between(forecast_times,
pred_means[:,0]+2*np.sqrt(pred_variances[:,0,0]),
pred_means[:,0]-2*np.sqrt(pred_variances[:,0,0]),
color='black',alpha=.5,label='Uncertainty')
plt.legend() #bbox_to_anchor=[1,1],fontsize=20
plt.show()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
20a.py
~~~~~~
Advent of Code 2017 - Day 20: Particle Swarm
Part One
Suddenly, the GPU contacts you, asking for help. Someone has asked it to
simulate too many particles, and it won't be able to finish them all in
time to render the next frame at this rate.
It transmits to you a buffer (your puzzle input) listing each particle in
order (starting with particle 0, then particle 1, particle 2, and so on).
For each particle, it provides the X, Y, and Z coordinates for the
particle's position (p), velocity (v), and acceleration (a), each in the
format <X,Y,Z>.
Each tick, all particles are updated simultaneously. A particle's
properties are updated in the following order:
- Increase the X velocity by the X acceleration.
- Increase the Y velocity by the Y acceleration.
- Increase the Z velocity by the Z acceleration.
- Increase the X position by the X velocity.
- Increase the Y position by the Y velocity.
- Increase the Z position by the Z velocity.
Because of seemingly tenuous rationale involving z-buffering, the GPU would
like to know which particle will stay closest to position <0,0,0> in the
long term. Measure this using the Manhattan distance, which in this
situation is simply the sum of the absolute values of a particle's X, Y,
and Z position.
For example, suppose you are only given two particles, both of which stay
entirely on the X-axis (for simplicity). Drawing the current states of
particles 0 and 1 (in that order) with an adjacent a number line and
diagram of current X positions (marked in parenthesis), the following would
take place:
p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0> (0)(1)
p=< 4,0,0>, v=< 1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 2,0,0>, v=<-2,0,0>, a=<-2,0,0> (1) (0)
p=< 4,0,0>, v=< 0,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-2,0,0>, v=<-4,0,0>, a=<-2,0,0> (1) (0)
p=< 3,0,0>, v=<-1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-8,0,0>, v=<-6,0,0>, a=<-2,0,0> (0)
At this point, particle 1 will never be closer to <0,0,0> than particle 0,
and so, in the long run, particle 0 will stay closest.
Which particle will stay closest to position <0,0,0> in the long term?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
from vector import Vector
class Particle(object):
def __init__(self, i, p, v, a):
self.i = i
self.p = p
self.v = v
self.a = a
def __iter__(self):
return self
def __next__(self):
self.update()
return self
def ff(self, t):
"""Fast forward the position by t ticks"""
self.p = t**2 * self.a + t * self.v + self.p
def update(self):
"""Update positon according to acceleration and velocity vectors"""
self.v += self.a
self.p += self.v
def __abs__(self):
"""Return lenght of vector position"""
return abs(self.p)
def __repr__(self):
return f"id={self.i}, p={self.p}, v={self.v}, a={self.a}"
def solve(system):
"""Return ID of particle who stays the closest to <0,0,0> in the long term.
:system: particle initial system with position, velocity and acceleration
vectors
:returns: particle ID of the closest to <0,0,0> in the long term.
>>> solve('''p=<3,0,0>, v=<2,0,0>, a=<-1,0,0>
... p=<4,0,0>, v=<0,0,0>, a=<-2,0,0>''')
0
"""
particles = []
for i, line in enumerate(system.strip().split('\n')):
vectors = line.strip().split(', ')
p, v, a = (Vector(*map(int, v[3:-1].split(','))) for v in vectors)
particles.append(Particle(i, p, v, a))
t = 10000
for p in particles:
p.ff(t)
return sorted(particles, key=abs)[0].i
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
print(solve(f.read()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| python |
expected_output = {
"cos-interface-information": {
"interface-map": {
"i-logical-map": {
"cos-objects": {
"cos-object-index": ["9", "13"],
"cos-object-name": [
"dscp-ipv6-compatibility",
"ipprec-compatibility",
],
"cos-object-subtype": ["dscp-ipv6", "ip"],
"cos-object-type": ["Classifier", "Classifier"],
},
"i-logical-index": "335",
"i-logical-name": "ge-0/0/2.0",
},
"interface-congestion-notification-map": "Disabled",
"interface-exclude-queue-overhead-bytes": "disabled",
"interface-index": "150",
"interface-logical-interface-aggregate-statistics": "disabled",
"interface-name": "ge-0/0/2",
"interface-queues-in-use": "4",
"interface-queues-supported": "8",
"interface-shaping-rate": "1000000",
"scheduler-map-index": "2",
"scheduler-map-name": "<default>",
}
}
}
| python |
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework.permissions import IsAuthenticated
from elvanto_sync import views_api as va
from elvanto_sync import views_buttons as vb
from elvanto_sync.mixins import LoginRequiredMixin
from elvanto_sync.models import ElvantoGroup, ElvantoPerson
from elvanto_sync.serializers import (ElvantoGroupSerializer, ElvantoPersonSerializer)
from django.conf.urls import include, url
admin.autodiscover()
class RestrictedTemplateView(LoginRequiredMixin, TemplateView):
pass
auth_patterns = [
url(r'^auth/', include('allauth.urls')),
]
urls_basic = [
path(r'admin/', admin.site.urls),
url(r'^$', RestrictedTemplateView.as_view(template_name='elvanto_sync/index.html'), name='index'),
url(
r'^group/(?P<pk>[0-9]+)$',
RestrictedTemplateView.as_view(template_name='elvanto_sync/index.html'),
name='group'
)
]
urls_buttons = [
url(r'^buttons/update_global/$', vb.UpdateGlobal.as_view(), name='button_update_global'),
url(r'^buttons/update_local/$', vb.UpdateLocal.as_view(), name='button_update_local'),
url(r'^buttons/update_sync/$', vb.UpdateSync.as_view(), name='button_update_sync'),
url(r'^buttons/push_all/$', vb.PushAll.as_view(), name='button_push_all'),
url(r'^buttons/pull_all/$', vb.PullAll.as_view(), name='button_pull_all'),
url(r'^buttons/push_group/$', vb.PushGroup.as_view(), name='button_push_group'),
]
urls_api = [
# api
url(
r'^api/v1/elvanto/groups/$',
va.ApiCollection.as_view(
model_class=ElvantoGroup, serializer_class=ElvantoGroupSerializer, permission_classes=(IsAuthenticated, )
),
name='api_groups'
),
url(
r'^api/v1/elvanto/groups/(?P<pk>[0-9]+)$',
va.ApiMember.as_view(
model_class=ElvantoGroup,
serializer_class=ElvantoGroupSerializer,
permission_classes=(IsAuthenticated, ),
),
name='api_group'
),
url(
r'^api/v1/elvanto/people/$',
va.ApiCollection.as_view(
model_class=ElvantoPerson, serializer_class=ElvantoPersonSerializer, permission_classes=(IsAuthenticated, )
),
name='api_people'
),
]
urlpatterns = auth_patterns + urls_buttons + urls_api + urls_basic
| python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*- #
#
# Builds the GitHub Wiki documentation into a static HTML site.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script does the following to build the documentation:
# Pulls the latest changes from the GitHub Wiki repository
# Edits the MkDocs configuration file to include all the markdown files
# Creates an index.html file to have root redirected to a specific page
# Builds the static site using MkDocs
# REMOVES the root Documentation folder
# Copies the generate content into the root Documentation folder
#
from __future__ import unicode_literals, absolute_import
import os
import sys
import shutil
import subprocess
from tempfile import mkstemp
# mkdocs used only in the command line, imported just to ensure it's installed
try:
import mkdocs
except ImportError:
print("You need to have mkdocs installed !")
sys.exit(1)
# Path data
GITHUB_USER = "ngageoint"
WIKI_NAME = "scale.wiki"
GITHUB_WIKI_REPO = "github.com/%s/%s.git" % (GITHUB_USER, WIKI_NAME)
GIT_INIT_SCRIPT = 'setup_wiki_git.sh'
MKDOCS_FOLDER = "wiki"
THIS_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
MKDOCS_DIR = os.path.join(THIS_FILE_DIR, MKDOCS_FOLDER)
WIKI_DIR = os.path.join(MKDOCS_DIR, WIKI_NAME)
GIT_INIT_FILE = os.path.join(WIKI_DIR, GIT_INIT_SCRIPT)
DEFAULT_INDEX = 'Home'
def pull_wiki_repo():
"""
Pulls latest changes from the wiki repo.
:return: Boolean indicating if the operation was successful.
"""
# Set working directory to the wiki repository
wiki_folder = os.path.join(MKDOCS_DIR, WIKI_NAME)
if os.path.isdir(wiki_folder):
os.chdir(wiki_folder)
else:
print("ERROR: Wiki repo directory is not correct: %s" % wiki_folder)
return False
# Init git in the wiki folder
subprocess.call(["sh", GIT_INIT_FILE])
# Ensure the submodule is initialised, progress is printed to stderr so just
# call subprocess with all data sent to console and error check later
subprocess.call(["git", "submodule", "update", "--init", "--recursive"])
# Ensure the subfolder selected is the correct repository
pipe = subprocess.PIPE
git_process = subprocess.Popen(
["git", "config", "--get", "remote.origin.url"],
stdout=pipe, stderr=pipe)
std_op, std_err_op = git_process.communicate()
if std_err_op:
print("ERROR: Could not get the remote information from the wiki "
"repository !\n%s" + std_err_op)
return False
if not GITHUB_WIKI_REPO in std_op:
print(("ERROR: Wiki repository:\n\t%s\n" % GITHUB_WIKI_REPO) +
"not found in directory %s url:\n\t%s\n" % (wiki_folder, std_op))
return False
# Git Fetch prints progress in stderr, so cannot check for erros that way
print("\nPull from Wiki repository...")
subprocess.call(["git", "pull", "origin", "master"])
print("")
return True
def edit_mkdocs_config():
"""
Edits the mkdocs.yml MkDocs configuration file to include all markdown
files as part of the documentation.
These files are created by default with the '.md' extension and it is
assumed no other file extensions are to be linked.
:return: Boolean indicating the success of the operation.
"""
path_list = []
for file in os.listdir(os.path.join(MKDOCS_DIR, WIKI_NAME)):
if file.endswith(".md"):
path_list.append("- '%s': '%s'" %
(file, file[:-3].replace("-", " ")))
if not path_list:
print(("ERROR: No markdown files found in %s ! " % MKDOCS_DIR) +
"Check if repository has been set up correctly.")
return False
pages_str = "pages:\n" + "\n".join(path_list) + "\n"
# Replace the pages data, strategically located at the end of the file
mkdocs_yml = os.path.join(MKDOCS_DIR, "mkdocs.yml")
if not os.path.exists(mkdocs_yml):
print("ERROR: The MkDocs config file %s does not exist !" % mkdocs_yml)
return False
# Copy config file until the pages line, strategically located at the end
temp_file_handler, temp_abs_path = mkstemp()
with open(temp_abs_path, 'w') as temp_file:
with open(mkdocs_yml) as original_file:
for line in original_file:
if not "pages:" in line:
temp_file.write(line)
else:
print("Replacing 'pages' property found in mkdocs.yml ...")
break
else:
print("Did not find the 'pages' property in mkdocs.yml.\n" +
"Attaching the property at the end of the file.")
temp_file.write(pages_str)
print(pages_str)
# Remove original file and move the new temp to replace it
os.close(temp_file_handler)
try:
os.remove(mkdocs_yml)
except IOError:
print("ERROR: Could not delete original config file %s !" % mkdocs_yml)
return False
try:
shutil.move(temp_abs_path, mkdocs_yml)
except shutil.Error:
print("ERROR: Could move new config file to %s !" % mkdocs_yml)
return False
return True
def create_index():
"""
Creates an HTML index page to redirect to an MkDocs generated page.
:return: Boolean indicating the success of the operation.
"""
html_code = \
"<!DOCTYPE HTML>\n " \
"<html>\n" \
"\t<head>\n" \
"\t\t<meta charset=\"UTF-8\">\n" \
"\t\t<meta http-equiv=\"refresh\" content=\"1;url=%s/index.html\">\n" \
% DEFAULT_INDEX + \
"\t\t<script type=\"text/javascript\">\n" \
"\t\t\twindow.location.href = \"%s/index.html\"\n" % DEFAULT_INDEX +\
"\t\t</script>\n" \
"\t</head>\n" \
"\t<body>\n" \
"\t\tIf you are not redirected automatically to the " \
"%s page, follow this <a href=\"%s/index.html\">link</a>\n"\
% (DEFAULT_INDEX, DEFAULT_INDEX) + \
"\t</body>\n" \
"</html>\n"
print("Creating the index.html file...\n")
generated_site_dir = os.path.join(MKDOCS_DIR, "site")
if not os.path.exists(generated_site_dir):
try:
os.makedirs(generated_site_dir)
except IOError:
print("ERROR: Could not create site folder in %s !\n" %
generated_site_dir)
return False
try:
index_file = open(os.path.join(generated_site_dir, "index.html"), "w")
index_file.write(html_code)
index_file.close()
return True
except IOError:
print("ERROR: Could not create index.html file in %s !\n" %
generated_site_dir)
return False
def build_mkdocs():
"""
Invokes MkDocs to build the static documentation and moves the folder
into the project root folder.
:return: Boolean indicating the success of the operation.
"""
# Setting the working directory
if os.path.isdir(MKDOCS_DIR):
os.chdir(MKDOCS_DIR)
else:
print("ERROR: MkDocs directory is not correct: %s" % MKDOCS_DIR)
return False
# Building the MkDocs project
pipe = subprocess.PIPE
mkdocs_process = subprocess.Popen(
["mkdocs", "build"], stdout=pipe, stderr=pipe)
std_op, std_err_op = mkdocs_process.communicate()
if std_err_op:
print("ERROR: Could not build MkDocs !\n%s" %
std_err_op)
return False
else:
print(std_op)
# Remove root Documentation folder and copy the new site files into it
generated_site_dir = os.path.join(MKDOCS_DIR, "site")
root_documentation_dir = os.path.join(
os.path.dirname(THIS_FILE_DIR), "documentation")
print("Copy folder %s into %s ...\n" %
(generated_site_dir, root_documentation_dir))
if os.path.exists(root_documentation_dir):
try:
shutil.rmtree(root_documentation_dir)
except shutil.Error:
print("ERROR: Could not remove root documentation folder !")
return False
try:
shutil.move(generated_site_dir, root_documentation_dir)
except shutil.Error:
print("ERROR: Could move new documentation files from " +
"%s to %s !" % (generated_site_dir, root_documentation_dir))
return False
return True
def build_docs():
""" Builds the documentation HTML pages from the Wiki repository. """
success = pull_wiki_repo()
if success is False:
sys.exit(1)
success = edit_mkdocs_config()
if success is False:
sys.exit(1)
# Create index.html before the MkDocs site is created in case the project
# already contains an index file.
success = create_index()
if success is False:
sys.exit(1)
success = build_mkdocs()
if success is False:
sys.exit(1)
print("Build process finished!")
if __name__ == "__main__":
build_docs()
| python |
#---- Python VNF startup for ENCRYPT_2_to_1---
import SSL_listener
import SSL_writer
incomingIP="localhost"
incomingPort=10026
incomingPrivateKeyFile="server.key"
incomingPublicKeyFile="server.crt"
outgoingIP="localhost"
outgoingPort=10027
outgoingPublicKeyFile="server.crt"
def startENCRYPT_2_to_1():
ssl_writer=SSL_writer.SSL_writer(outgoingIP,outgoingPort, outgoingPublicKeyFile)
incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,ssl_writer)
| python |
from pymongo import MongoClient
class mongoRPSE:
mongos = ""
#insertar datos
def insert_mongo_files(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_process.insert_one(data)
def insert_mongo_score(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_score.insert_one(data)
def inset_mongo_count(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_conteo.insert_one(data)
def update_mongo_score(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_process.update_one({"_id":data["_id"]},{'$set': data})
#Buscar empresa
def find_diario_in_html(self, html):
diarios = self.findAllDiario()
data = "<meta name=\"url\" content=\"https://"
if(data in str(html).lower()):
for diario in diarios:
print("filtro semana")
d = data+str(diario["url"])
if(d in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
data = "<meta property=\"og:url\" content=\"https://"
data1 = "<meta property=\"og:url\" content=\"http://"
if(data in str(html).lower() or data1 in str(html).lower()):
for diario in diarios:
d = data+str(diario["url"])
d1 = data1+str(diario["url"])
if(d in str(html).lower() or d1 in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
else:
for diario in diarios:
url = str(diario["url"])
if("www." in url):
url = str(diario["url"])[4:len(url)]
if(url in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
return ""
#listar Datos
def find_file_process(self, titulo, empresa):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
files = db.empresas_file_process
query = {"empresa": empresa, "titulo": titulo}
data = files.find(query)
return data
def findAllDiario(self):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
diarios = db.diarios
return diarios.find()
def find_diario(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return d
def findAllEmpresas(self):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
empresas = db.empresas
return empresas.find()
#Filtros para limpiar datos
def html_inicio(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return str(d["inicio"])
def html_fin(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return str(d["fin"])
def prueba(self):
self.mongos = "method prueba"
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
#Insertar Diarios de Prueba
diarios=[
{"url": "www.eltiempo.com", "nombre": "eltiempo", "inicio":"<div class=\"articulo-contenido\" itemprop=\"articleBody\">", "fin": "<div class=\"articulo-enlaces\""},
{"url": "www.elespectador.com", "nombre":"espectador", "inicio": '<div class="node-body content_nota field field--name-body field--type-text-with-summary field--label-hidden', "fin": "</div>"},
{"url": "www.dinero.com", "nombre":"dinero", "inicio": "<div id=\"contentItem\">", "fin": "</div>"},
{"url": "www.semana.com", "nombre":"semana", "inicio": "<!-- Alliance -->", "fin": "</div>"},
{"url": "sostenibilidad.semana.com", "nombre":"sostenibilidad", "inicio": "<!-- Alliance -->", "fin": "</div>"},
{"url": "www.larepublica.co", "nombre":"larepublica", "inicio": "<div class=\"lead\">", "fin": "<p> </p>"},
{"url": "www.portafolio.co", "nombre":"portafolio", "inicio": "<div class=\"article-content\" itemprop=\"articleBody\"", "fin": "<div class=\"article-bottom-ads\""},
{"url": "gerente.com/co", "nombre":"gerente", "inicio": "<div class=\"article-content\">", "fin": "</div>"}]
for d in diarios:
db.diarios.insert_one(d)
#Insertar Informacion de empresas a buscar
empresas = [
{'empresa': 'ECOPETROL', 'clave': ['ecopetrol', 'reficar']},
{'empresa': 'CANACOL ENERGY', 'clave': ['canacol', 'canacol energy']},
{'empresa': 'CEPSA', 'clave': ['cepsa', 'cepsa colombia']},
{'empresa': 'GENERAL', 'clave': ['fracking','gasoductos','petroleras']},
{'empresa': 'BPC', 'clave': ['british petroleum','british petroleum']}]
for d in empresas:
db.empresas.insert_one(d)
| python |
from floodsystem import stationdata
from floodsystem import station
def run():
stations = stationdata.build_station_list()
List = station.inconsistent_typical_range_stations(stations)
print(List)
print(f"Number of inconsistent stations: {len(List)}")
if __name__ == '__main__':
run() | python |
# IME 2022 - LabProg II
#
# Script just testing ploting on python
# This is not working propertly :p
import seaborn as sns
df = sns.load_dataset('iris')
# Usual boxplot
ax = sns.boxplot(x='species', y='sepal_length', data=df)
# Add jitter with the swarmplot function.
ax = sns.swarmplot(x='species', y='sepal_length', data=df, color="grey")
| python |
from . import mixins # noqa
from . import generic # noqa
from . import formview # noqa
from . import detail # noqa
from . import uimock # noqa
| python |
from __future__ import print_function
import gdb
import socket
import pickle
import os
import subprocess as sp
import sys
IDA_HOST = '10.113.208.101'
PORT = 56746
TMPDIR = '/tmp/iddaa'
def connect_ida():
if not os.path.exists(TMPDIR):
os.mkdir(TMPDIR)
try:
sock = socket.create_connection((IDA_HOST, PORT), timeout=3)
return sock
except socket.error as err:
sys.stderr.write("[ERROR] {}\n".format(err))
return None
def show_result(result):
try:
f = open('{}/result'.format(TMPDIR), 'w')
f.write(result)
f.close()
except err:
sys.stderr.write("[ERROR] {}\n".format(''))
return
gdb.execute('shell vim {}/result'.format(TMPDIR))
def send(sock, buf):
if sys.version_info < (3, 0):
sock.send(buf)
else:
sock.send(bytes(buf, 'UTF-8'))
def recv(sock, raw=False):
buf = bytes()
while True:
tmp = sock.recv(4096)
buf += tmp
if not tmp:
break
if raw:
return buf
else:
return buf if sys.version_info < (3, 0) else buf.decode()
def get_ida_symbols():
sock = connect_ida()
if not sock: return
send(sock, 'GETSYM')
buf = recv(sock, True)
with open('{}/symfile'.format(TMPDIR), 'wb') as f:
f.write(buf)
if os.path.exists('{}/symfile'.format(TMPDIR)):
gdb.execute('symbol-file {}/symfile'.format(TMPDIR))
else:
print('Can\'t not receive ida symfile.')
def get_pseudo_code(func):
sock = connect_ida()
if not sock: return
send(sock, 'GETPSEUDOCODE {}'.format(func))
code = recv(sock).strip()
if 'Function not found' in code:
print('[Error] ' + code)
return
show_result(code)
def get_local_type():
sock = connect_ida()
if not sock: return
send(sock, 'GETLOCALTYPE')
buf = recv(sock, True)
local_type = pickle.loads(buf)
with open('{}/localtype.h'.format(TMPDIR), 'wb') as f:
f.write(bytes(local_type['header'], 'UTF-8'))
with open('{}/localtype.cpp'.format(TMPDIR), 'wb') as f:
f.write(bytes(local_type['source'], 'UTF-8'))
cwd = os.getcwd()
os.chdir(TMPDIR)
if sp.check_call('g++ -c -g localtype.cpp'.split(' ')) == 0:
gdb.execute('add-symbol-file {}/localtype.o 0'.format(TMPDIR))
else:
print('Generate symbol file failed')
os.chdir(cwd)
def get_breakpoints():
sock = connect_ida()
if not sock: return
send(sock, 'GETBREAKPOINTS')
buf = recv(sock, True)
bps = pickle.loads(buf)
print(bps)
for bp in bps:
gdb.execute('break *{}'.format(bp))
class IDAPYTHON(gdb.Command):
""" IDA python script wrapper"""
def __init__(self):
super(IDAPYTHON, self).__init__('idapython', gdb.COMMAND_USER)
def invoke(self, args, from_tty):
if args == 'cheatsheet':
self.__cheatsheet()
return
sock = connect_ida()
if not sock: return
send(sock, 'EXECFILE')
buf = ''
try:
f = open(args, 'r')
buf = f.read()
except:
print('[ERROR] File not found.')
return
send(sock, buf)
show_result(recv(sock))
def __cheatsheet(self):
print('IDA python Cheat Sheet')
print()
print('idc MakeComm(addr, comment)')
print('----------------------------------------')
print('Add comment at specified address.')
print('Ex: idc MakeComm(0x804ddaa, \'Soy Sauce\')')
print()
print('idc SetColor(addr, what, color)')
print('----------------------------------------')
print('Set color for specified area')
print('Ex: idc SetColor(0x0804ddaa, 1, 0xaabbcc) // address only')
print(' idc SetColor(0x0804ddaa, 2, 0xaabbcc) // entire function')
print(' idc SetColor(0x0804ddaa, 3, 0xaabbcc) // entire segment')
print()
class IDARPC(gdb.Command):
""" IDA python command wrapper"""
def __init__(self, name):
super(IDARPC, self).__init__(name, gdb.COMMAND_USER)
self.name = name
def invoke(self, args, from_tty):
sock = connect_ida()
if not sock: return
send(sock, 'EXEC {}.{}'.format(self.name, args))
show_result(recv(sock))
IDAPYTHON()
IDARPC('idautils')
IDARPC('idaapi')
IDARPC('idc')
| python |
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.profiler import profile, record_function, ProfilerActivity, schedule
import torch
import torch.cuda as cutorch
import numpy as np
import pandas as pd
import asyncio
import os
os.environ['TOKENIZERS_PARALLELISM'] = "false"
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from ecosys.utils.logger import Logger
from ecosys.utils.data_processor import processors, output_modes
from ecosys.utils.data_structure import HuggingFaceDataset
logger = Logger(__file__, "info", "w")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
feature_size = 768
sequence_length = 128
task_name = 'CoLA'
batch_size = 32
base_dir = "/home/oai/share"
tokenizer = AutoTokenizer.from_pretrained(f"{base_dir}/HuggingFace/bert-base-uncased")
model_keys = [
"Distil",
"Base",
"Large",
]
model_paths = [
f"{base_dir}/HuggingFace/distilbert-base-uncased",
f"{base_dir}/HuggingFace/bert-base-uncased",
f"{base_dir}/HuggingFace/bert-large-uncased",
]
model_paths = dict(zip(model_keys, model_paths))
models = dict()
for key in model_keys:
logger.debug("key %s, path %s", key, model_paths[key])
models[key] = AutoModelForSequenceClassification.from_pretrained(model_paths[key]).to(device)
models[key].eval()
# ------------- Dataset Prepare --------------
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
def fill_mask(sentence):
words = sentence.split()
rnd_idx = np.random.randint(0,len(words))
words[rnd_idx] = "[MASK]"
return ' '.join(words)
texts = processor.get_train_tsv(f'/data/GlueData/{task_name}/').reset_index()
texts["sentence"] = texts["sentence"].apply(fill_mask)
encoded_texts = tokenizer(
texts["sentence"].to_list(),
padding = 'max_length',
truncation = True,
max_length=sequence_length,
return_tensors = 'pt'
)
dataset = HuggingFaceDataset(encoded_texts, torch.tensor(texts['label'].to_list()))
sampler = SequentialSampler(dataset)
logger.info("n_samples %s", len(dataset))
# performance_schedule = schedule(
# skip_first=10,
# wait=5,
# warmup=1,
# active=3,
# repeat=2
# )
import subprocess as sp
record = {
'bs': list(),
'key': list(),
'mem': list(),
'tol_t': list(),
'avg_t': list(),
}
def get_gpu_memory():
command = "nvidia-smi --query-gpu=memory.used --format=csv"
memory_used_info = sp.check_output(command.split()).decode('ascii').split('\n')[:-1][1:]
memory_used_values = [int(x.split()[0]) for i, x in enumerate(memory_used_info)]
# return np.sum(memory_used_values)
return memory_used_values[-1]
async def inference(key, input):
models[key](**input)
for key in model_keys:
with torch.no_grad():
for batch_size in [1, 2, 4, 8, 16 ,32, 64, 128, 256, 512]:
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=batch_size
)
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
timings = []
starter.record()
loop = asyncio.new_event_loop()
tasks = [
inference(key, input) for input, _ in dataloader
]
loop.run_until_complete(asyncio.wait(tasks))
ender.record()
torch.cuda.synchronize()
loop.close()
# for input, _ in tqdm(dataloader, desc="Measuring"):
# models[key](**input)
curr_time = starter.elapsed_time(ender)
timings.append(curr_time)
# print(dir(cutorch.get_device_properties(device)))
# print(prof.key_averages())
record['bs'].append(batch_size)
record['key'].append(key)
record['mem'].append(get_gpu_memory())
record['tol_t'].append(np.sum(timings))
record['avg_t'].append(np.mean(timings))
logger.info(
"bs %s; key %s; Mem (MiB) %s; total time (ms) %s; avg time (ms) %s",
batch_size,
key,
get_gpu_memory(),
np.sum(timings),
np.mean(timings)
)
# logger.info("bs %s; key %s;\n\n %s \n\n ", batch_size, key, prof.key_averages().table(sort_by="cuda_time_total"))
df = pd.DataFrame(record)
df.to_csv(os.path.join(os.path.dirname(__file__), f"lm_throughput_{task_name}.csv")) | python |
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib.animation as animation
import sys
# https://towardsdatascience.com/modelling-the-three-body-problem-in-classical-mechanics-using-python-9dc270ad7767
# https://evgenii.com/blog/two-body-problem-simulator/
animate = False
trail = False
previous = 0
def plotData(x1data,x2data,y1data,y2data, z1data, z2data):
global animate
fig = plt.figure()
ax = plt.axes(projection='3d')
if animate:
firstBodyTrail, = ax.plot(x1data, y1data, z1data,'blue',label="body1(t)")
secondBodyTrail, = ax.plot(x2data, y2data, z2data, '#f5a60a',label="body2(t)")
firstBody, = ax.plot(x1data, y1data, z1data,'blue', marker="o")
secondBody, = ax.plot(x2data, y2data, z2data, '#f5a60a',marker="o")
ax.legend()
def updateAnimation(num):
global previous, trail
if num<len(x1data):
firstBodyTrail.set_data(x1data[previous:num], y1data[previous:num])
firstBodyTrail.set_3d_properties(z1data[previous:num])
firstBody.set_data(x1data[num], y1data[num])
firstBody.set_3d_properties(z1data[num])
secondBodyTrail.set_data(x2data[previous:num], y2data[previous:num])
secondBodyTrail.set_3d_properties(z2data[previous:num])
secondBody.set_data(x2data[num], y2data[num])
secondBody.set_3d_properties(z2data[num])
# Trail
if trail:
if (num - previous)<260 and num > 250:
previous = previous + 1
#secondBody.set_color('#9944'+"%02x"%((0x55+num)%0xFF))
return firstBodyTrail, secondBodyTrail,
anim = animation.FuncAnimation(fig,updateAnimation, interval=1,blit=False)
else:
ax.scatter(x1data, y1data, z1data, label="x1(t)")
ax.scatter(x2data, y2data, z2data, label="x2(t)")
ax.legend()
plt.show()
def calculateTrajectories(t, m1, m2, r, R):
# Data for a three-dimensional line
x1data = np.zeros((len(t)))
y1data = np.zeros((len(t)))
z1data = np.zeros((len(t)))
x2data = np.zeros((len(t)))
y2data = np.zeros((len(t)))
z2data = np.zeros((len(t)))
m1 = float(m1)
m2 = float(m2)
M = m1 + m2
for i in range(len(t)):
#print(r[i][0])
x1data[i] = float(R[i][0]) + m2/M * float(r[i][0])
y1data[i] = float(R[i][1]) + m2/M * float(r[i][1])
z1data[i] = float(R[i][2]) + m2/M * float(r[i][2])
x2data[i] = float(R[i][0]) - m1/M * float(r[i][0])
y2data[i] = float(R[i][1]) - m1/M * float(r[i][1])
z2data[i] = float(R[i][2]) - m1/M * float(r[i][2])
#print("%-4d %-10s %-10s %-10s %-10s %-10s %-10s"%(i, x1data[i], x2data[i], y1data[i], y2data[i], z1data[i], z2data[i]))
plotData(x1data,x2data,y1data,y2data,z1data,z2data)
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) == 2:
if sys.argv[1] == "-animate":
animate = True
elif sys.argv[1] == "-animatetrail":
animate = True
trail = True
f = open("data.out","r")
data = f.readlines()
f.close()
if data[0][0:2] == "m1" and data[1][0:2] == "m2" and data[2][0:1] == "t" and data[3][0:2] == "rx" and data[4][0:2] == "ry" and data[5][0:2] == "rz" and data[6][0:2] == "Rx" and data[7][0:2] == "Ry" and data[8][0:2] == "Rz":
m1 = data[0].split(" ")[2]
m2 = data[1].split(" ")[2]
t = data[2].split(" ")[2:]
rx = data[3].split(" ")[2:]
ry = data[4].split(" ")[2:]
rz = data[5].split(" ")[2:]
Rx = data[6].split(" ")[2:]
Ry = data[7].split(" ")[2:]
Rz = data[8].split(" ")[2:]
r = [list(a) for a in zip(rx,ry,rz)]
R = [list(a) for a in zip(Rx,Ry,Rz)]
calculateTrajectories(t, m1, m2, r, R)
elif data[0][0:2] == "m1" and data[1][0:2] == "m2" and data[2][0:1] == "t" and data[3][0:2] == "x1" and data[4][0:2] == "y1" and data[5][0:2] == "z1" and data[6][0:2] == "x2" and data[7][0:2] == "y2" and data[8][0:2] == "z2":
m1 = data[0].split(" ")[2]
m2 = data[1].split(" ")[2]
t = data[2].split(" ")[2:]
x1 = data[3].split(" ")[2:]
y1 = data[4].split(" ")[2:]
z1 = data[5].split(" ")[2:]
x2 = data[6].split(" ")[2:]
y2 = data[7].split(" ")[2:]
z2 = data[8].split(" ")[2:]
x1data = np.zeros((len(t)))
y1data = np.zeros((len(t)))
z1data = np.zeros((len(t)))
x2data = np.zeros((len(t)))
y2data = np.zeros((len(t)))
z2data = np.zeros((len(t)))
for idx in range(len(t)):
x1data[idx] = float(x1[idx])
y1data[idx] = float(y1[idx])
z1data[idx] = float(z1[idx])
x2data[idx] = float(x2[idx])
y2data[idx] = float(y2[idx])
z2data[idx] = float(z2[idx])
plotData(x1data,x2data,y1data,y2data,z1data,z2data)
| python |
__author__ = 'anthonymendoza'
from django.db.models import Q, QuerySet
from rest_framework.response import Response
from rest_framework import status
def dynamic_field_lookups(query_params):
Qr = None
for filter_by, filter_value in query_params.iteritems():
filter_by = "date__gte" if filter_by == "start_date" else filter_by
filter_by = "date__lte" if filter_by == "end_date" else filter_by
if filter_by == 'dam_id':
q = Q(**{"%s__iexact" % filter_by: filter_value})
else:
q = Q(**{"%s" % filter_by: filter_value})
if Qr:
Qr = Qr & q
else:
Qr = q
return Qr
| python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tabular Q-learning agent (notebook)
This notebooks can be run directly from VSCode, to generate a
traditional Jupyter Notebook to open in your browser
you can run the VSCode command `Export Currenty Python File As Jupyter Notebook`.
"""
# pylint: disable=invalid-name
# %%
import sys
import logging
from typing import cast
import gym
import numpy as np
import matplotlib.pyplot as plt
from cyberbattle.agents.baseline.learner import TrainedLearner
import cyberbattle.agents.baseline.plotting as p
import cyberbattle.agents.baseline.agent_wrapper as w
import cyberbattle.agents.baseline.agent_tabularqlearning as a
from cyberbattle.agents.baseline.agent_wrapper import Verbosity
import cyberbattle.agents.baseline.learner as learner
from cyberbattle._env.cyberbattle_env import AttackerGoal
logging.basicConfig(stream=sys.stdout, level=logging.ERROR, format="%(levelname)s: %(message)s")
# %%
# Benchmark parameters:
# Parameters from DeepDoubleQ paper
# - learning_rate = 0.00025
# - linear epsilon decay
# - gamma = 0.99
# Eliminated gamma_values
# 0.0,
# 0.0015, # too small
# 0.15, # too big
# 0.25, # too big
# 0.35, # too big
#
# NOTE: Given the relatively low number of training episodes (50,
# a high learning rate of .99 gives better result
# than a lower learning rate of 0.25 (i.e. maximal rewards reached faster on average).
# Ideally we should decay the learning rate just like gamma and train over a
# much larger number of episodes
cyberbattlechain_10 = gym.make('CyberBattleChain-v0', size=10, attacker_goal=AttackerGoal(own_atleast_percent=1.0))
ep = w.EnvironmentBounds.of_identifiers(
maximum_node_count=12,
maximum_total_credentials=12,
identifiers=cyberbattlechain_10.identifiers
)
iteration_count = 9000
training_episode_count = 5
eval_episode_count = 5
gamma_sweep = [
0.015, # about right
]
def qlearning_run(gamma, gym_env):
"""Execute one run of the q-learning algorithm for the
specified gamma value"""
return learner.epsilon_greedy_search(
gym_env,
ep,
a.QTabularLearner(ep, gamma=gamma, learning_rate=0.90, exploit_percentile=100),
episode_count=training_episode_count,
iteration_count=iteration_count,
epsilon=0.90,
render=False,
epsilon_multdecay=0.75, # 0.999,
epsilon_minimum=0.01,
verbosity=Verbosity.Quiet,
title="Q-learning"
)
# %%
# Run Q-learning with gamma-sweep
qlearning_results = [qlearning_run(gamma, cyberbattlechain_10) for gamma in gamma_sweep]
qlearning_bestrun_10 = qlearning_results[0]
# %%
p.new_plot_loss()
for results in qlearning_results:
p.plot_all_episodes_loss(cast(a.QTabularLearner, results['learner']).loss_qsource.all_episodes, 'Q_source', results['title'])
p.plot_all_episodes_loss(cast(a.QTabularLearner, results['learner']).loss_qattack.all_episodes, 'Q_attack', results['title'])
plt.legend(loc="upper right")
plt.show()
# %% Plot episode length
p.plot_episodes_length(qlearning_results)
# %%
nolearning_results = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=a.QTabularLearner(ep, trained=qlearning_bestrun_10['learner'],
gamma=0.0, learning_rate=0.0, exploit_percentile=100),
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=0.30, # 0.35,
render=False,
title="Exploiting Q-matrix",
verbosity=Verbosity.Quiet
)
# %%
randomlearning_results = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=a.QTabularLearner(ep, trained=qlearning_bestrun_10['learner'],
gamma=0.0, learning_rate=0.0, exploit_percentile=100),
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=1.0, # purely random
render=False,
verbosity=Verbosity.Quiet,
title="Random search"
)
# %%
# Plot averaged cumulative rewards for Q-learning vs Random vs Q-Exploit
all_runs = [*qlearning_results,
randomlearning_results,
nolearning_results
]
Q_source_10 = cast(a.QTabularLearner, qlearning_bestrun_10['learner']).qsource
Q_attack_10 = cast(a.QTabularLearner, qlearning_bestrun_10['learner']).qattack
p.plot_averaged_cummulative_rewards(
all_runs=all_runs,
title=f'Benchmark -- max_nodes={ep.maximum_node_count}, episodes={eval_episode_count},\n'
f'dimension={Q_source_10.state_space.flat_size()}x{Q_source_10.action_space.flat_size()}, '
f'{Q_attack_10.state_space.flat_size()}x{Q_attack_10.action_space.flat_size()}\n'
f'Q1={[f.name() for f in Q_source_10.state_space.feature_selection]} '
f'-> {[f.name() for f in Q_source_10.action_space.feature_selection]})\n'
f"Q2={[f.name() for f in Q_attack_10.state_space.feature_selection]} -> 'action'")
# %%
# plot cumulative rewards for all episodes
p.plot_all_episodes(qlearning_results[0])
# %%
# Plot the Q-matrices
# %%
# Print non-zero coordinate in the Q matrix Q_source
i = np.where(Q_source_10.qm)
q = Q_source_10.qm[i]
list(zip(np.array([Q_source_10.state_space.pretty_print(i) for i in i[0]]),
np.array([Q_source_10.action_space.pretty_print(i) for i in i[1]]), q))
# %%
# Print non-zero coordinate in the Q matrix Q_attack
i2 = np.where(Q_attack_10.qm)
q2 = Q_attack_10.qm[i2]
list(zip([Q_attack_10.state_space.pretty_print(i) for i in i2[0]],
[Q_attack_10.action_space.pretty_print(i) for i in i2[1]], q2))
##################################################
# %% [markdown]
# ## Transfer learning from size 4 to size 10
# Exploiting Q-matrix learned from a different network.
# %%
# Train Q-matrix on CyberBattle network of size 4
cyberbattlechain_4 = gym.make('CyberBattleChain-v0', size=4,
attacker_goal=AttackerGoal(own_atleast_percent=1.0)
)
qlearning_bestrun_4 = qlearning_run(0.015, gym_env=cyberbattlechain_4)
def stop_learning(trained_learner):
return TrainedLearner(
learner=a.QTabularLearner(
ep,
gamma=0.0,
learning_rate=0.0,
exploit_percentile=0,
trained=trained_learner['learner']
),
title=trained_learner['title'],
trained_on=trained_learner['trained_on'],
all_episodes_rewards=trained_learner['all_episodes_rewards'],
all_episodes_availability=trained_learner['all_episodes_availability']
)
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=stop_learning(qlearning_bestrun_4),
eval_env=cyberbattlechain_10,
eval_epsilon=0.5, # alternate with exploration to help generalization to bigger network
eval_episode_count=eval_episode_count,
iteration_count=iteration_count
)
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=stop_learning(qlearning_bestrun_10),
eval_env=cyberbattlechain_4,
eval_epsilon=0.5,
eval_episode_count=eval_episode_count,
iteration_count=iteration_count
)
# %%
| python |
# The init module for all CRUD in bash
import uuid
import re
from datetime import datetime
from app.model.Bash import Bash
from random import randint
from app.utils.helpers import (
md5,
dell,
get_trace,
gen_hash,
check_password,
generate_key
)
from app.utils.save_bash import save_bash
from app.utils.get_bash import (
get_bash,
get_all_publics_bash,
get_all_private_bash,
get_content_by_key,
find_b4sh,
count_all
)
from app.utils.update_bash import (
update_bash,
up_vote,
down_vote
)
from app.utils.delete_bash import delete_bash
# Example of a valid bash object
# {
# "bash_id": "1234",
# "key": "123:sad",
# "hash": "sadoisankjcn2798382hnkjsacndskjcndsccdsc",
# "title": "A simple echo",
# "author": "d4rk3r",
# "description": "This is a test of the echo command",
# "content": "echo 'test'",
# "stats": {
# "used_count": 3,
# "updated_count": 1,
# "up_vote": 17,
# "down_vote": 3,
# },
# "history": [],
# "date": "2020-04-11 04:47:09"
# }
# for some long commands, we can save it on termbin
# curl -d "username=mkyong&password=abc" termbin.com:9999 --output -
| python |
import csv
from clint.textui import progress
from django.core.management.base import BaseCommand
from shapes.models import MaterialShape
from bsdfs.models import ShapeBsdfLabel_wd
class Command(BaseCommand):
args = ''
help = 'Helper to export CSV data'
def handle(self, *args, **options):
print 'Fetching data...'
qset = MaterialShape.objects.filter(
correct=True,
bsdf_wd__color_correct=True,
bsdf_wd__gloss_correct=True,
bsdf_wd__init_method='KR',
photo__scene_category_correct_score__gt=0,
)
shapes = qset.values_list(
'id',
'photo__scene_category__name',
'photo__scene_category_correct_score',
'substance__name',
'name__name',
'planar',
'bsdf_wd',
)
bsdfs = ShapeBsdfLabel_wd.objects.in_bulk(
qset.values_list('bsdf_wd', flat=True)
)
filename = args[0] if len(args) >= 1 else 'out.csv'
print 'Writing data to %s...' % filename
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerow([
'shape_id',
'scene',
'scene_correct_score',
'material_name',
'object_name',
'planar',
'bsdf_wd_id',
'rho_d_r',
'rho_d_g',
'rho_d_b',
'rho_s_r',
'rho_s_g',
'rho_s_b',
'alpha',
'colored_reflection',
'color_correct_score',
'gloss_correct_score',
])
for shape in progress.bar(shapes):
b = bsdfs[shape[6]]
rho = b.rho()
writer.writerow(
list(shape) +
list(rho[0]) +
list(rho[1]) +
[b.alpha(), b.metallic, b.color_correct_score, b.gloss_correct_score]
)
| python |
import os
import shutil
import typing
from ConfigSpaceNNI import ConfigurationSpace
from smac.configspace import pcs_new as pcs
class OutputWriter(object):
"""Writing scenario to file."""
def __init__(self):
pass
def write_scenario_file(self, scenario):
"""Write scenario to a file (format is compatible with input_reader).
Will overwrite if file exists. If you have arguments that need special
parsing when saving, specify so in the _parse_argument-function.
Creates output-dir if necessesary.
Parameters
----------
scenario: Scenario
Scenario to be written to file
Returns
-------
status: False or None
False indicates that writing process failed
"""
if scenario.output_dir_for_this_run is None or scenario.output_dir_for_this_run == "":
scenario.logger.info("No output directory for scenario logging "
"specified -- scenario will not be logged.")
return False
# Create output-dir if necessary
if not os.path.isdir(scenario.output_dir_for_this_run):
scenario.logger.debug("Output directory does not exist! Will be "
"created.")
try:
os.makedirs(scenario.output_dir_for_this_run)
except OSError:
raise OSError("Could not make output directory: "
"{}.".format(scenario.output_dir_for_this_run))
# options_dest2name maps scenario._arguments from dest -> name
options_dest2name = {(scenario._arguments[v]['dest'] if
scenario._arguments[v]['dest'] else v) : v for v in scenario._arguments}
# Write all options into "output_dir/scenario.txt"
path = os.path.join(scenario.output_dir_for_this_run, "scenario.txt")
scenario.logger.debug("Writing scenario-file to {}.".format(path))
with open(path, 'w') as fh:
for key in options_dest2name:
new_value = self._parse_argument(scenario, key, getattr(scenario, key))
if new_value is not None:
fh.write("{} = {}\n".format(options_dest2name[key], new_value))
def _parse_argument(self, scenario, key: str, value):
"""Some values of the scenario-file need to be changed upon writing,
such as the 'ta' (target algorithm), due to it's callback. Also,
the configspace, features, train_inst- and test-inst-lists are saved
to output_dir, if they exist.
Parameters:
-----------
scenario: Scenario
Scenario-file to be written
key: string
Name of the attribute in scenario-file
value: Any
Corresponding attribute
Returns:
--------
new value: string
The altered value, to be written to file
Sideeffects:
------------
- copies files pcs_fn, train_inst_fn, test_inst_fn and feature_fn to
output if possible, creates the files from attributes otherwise
"""
if key in ['pcs_fn', 'train_inst_fn', 'test_inst_fn', 'feature_fn']:
# Copy if file exists, else write to new file
if value is not None and os.path.isfile(value):
try:
return shutil.copy(value, scenario.output_dir_for_this_run)
except shutil.SameFileError:
return value # File is already in output_dir
elif key == 'pcs_fn' and scenario.cs is not None:
new_path = os.path.join(scenario.output_dir_for_this_run, "configspace.pcs")
self.write_pcs_file(scenario.cs, new_path)
elif key == 'train_inst_fn' and scenario.train_insts != [None]:
new_path = os.path.join(scenario.output_dir_for_this_run, 'train_insts.txt')
self.write_inst_file(scenario.train_insts, new_path)
elif key == 'test_inst_fn' and scenario.test_insts != [None]:
new_path = os.path.join(scenario.output_dir_for_this_run, 'test_insts.txt')
self.write_inst_file(scenario.test_insts, new_path)
elif key == 'feature_fn' and scenario.feature_dict != {}:
new_path = os.path.join(scenario.output_dir_for_this_run, 'features.txt')
self.write_inst_features_file(scenario.n_features,
scenario.feature_dict, new_path)
else:
return None
# New value -> new path
return new_path
elif key == 'ta' and value is not None:
# Reversing the callback on 'ta' (shlex.split)
return " ".join(value)
elif key in ['train_insts', 'test_insts', 'cs', 'feature_dict']:
# No need to log, recreated from files
return None
else:
return value
def write_inst_file(self, insts: typing.List[str], fn: str):
"""Writes instance-list to file.
Parameters
----------
insts: list<string>
Instance list to be written
fn: string
Output path
"""
with open(fn, 'w') as fh:
fh.write("\n".join(insts))
def write_inst_features_file(self, n_features: int, feat_dict, fn: str):
"""Writes features to file.
Parameters
----------
n_features: int
Number of features
feat_dict: dict
Features to be written
fn: string
File name of instance feature file
"""
header = "Instance, " + ", ".join(
["feature"+str(i) for i in range(n_features)]) + "\n"
body = [", ".join([inst] + [str(f) for f in feat_dict[inst]]) + "\n"
for inst in feat_dict]
with open(fn, 'w') as fh:
fh.write(header + "".join(body))
def write_pcs_file(self, cs: ConfigurationSpace, fn: str):
"""Writing ConfigSpace to file.
Parameters
----------
cs: ConfigurationSpace
Config-space to be written
fn: string
Output-file-path
"""
with open(fn, 'w') as fh:
fh.write(pcs.write(cs))
| python |
# Generated by Django 3.2.8 on 2022-01-17 16:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cause',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cause_id', models.TextField(max_length=200, verbose_name='Cause ID')),
('label', models.TextField(max_length=200, verbose_name='Cause Label')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Cause Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Cause Updated')),
('tickets', models.IntegerField(blank=True, default=0, null=True, verbose_name='Ticket Count')),
],
),
]
| python |
import os
import glob
import pandas as pd
flag = True
results = pd.DataFrame()
for counter, current_file in enumerate(glob.glob("*.CSV")):
namedf = pd.read_csv(current_file, header=None, sep=";")
# print(namedf)
results = pd.concat([results, namedf])
results.to_csv('Combined.csv', index=None, sep=",")
# extension = 'CSV'
# all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
# #combine all files in the list
# combined_csv = pd.concat([pd.read_csv(f, sep=';') for f in all_filenames ])
# #export to csv
# print(combined_csv.head())
# # combined_csv.to_csv( "combined_raw.csv", index=False, encoding='utf-8-sig') | python |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from . import views
urlpatterns = [
url(r'^record_history/(?P<account_id>\d+)/$', login_required(views.RecordHistoryView.as_view()), name = 'record_history'),
url(r'^account_list/(?P<trade_type>\w+)/$', login_required(views.AccountListView.as_view()), name = 'account_list'),
url(r'^account_history/(?P<account_id>\d+)/$', login_required(views.AccountHistoryView.as_view()), name = 'account_history'),
url(r'^account_history/(?P<trade_type>\w+)/$', login_required(views.AccountHistoryView.as_view()), name = 'account_histories'),
url(r'^rebalance_list/$', login_required(views.RebalanceListView.as_view()), name = 'rebalance_list'),
url(r'^rebalance_history/(?P<pair_id>\d+)/$', login_required(views.RebalanceHistoryView.as_view()), name = 'rebalance_history'),
url(r'^rebalance_history/$', login_required(views.RebalanceHistoryView.as_view()), name = 'rebalance_histories'),
] | python |
a = input('Digite algo: ')
print('é minusculo?', a.islower())
print('é maiuscula?', a.isupper())
print('é um número?', a.isnumeric())
print('é uma letra?', a.isalpha())
| python |
from gym_brt.envs.reinforcementlearning_extensions.rl_reward_functions import (
swing_up_reward,
balance_reward
)
from gym_brt.envs.qube_balance_env import (
QubeBalanceEnv,
)
from gym_brt.envs.qube_swingup_env import (
QubeSwingupEnv,
)
from gym_brt.envs.reinforcementlearning_extensions.rl_gym_classes import (
QubeBeginUpEnv,
QubeBeginDownEnv,
RandomStartEnv,
NoisyEnv,
convert_state,
convert_state_back
)
from gym.envs.registration import register
register(
id='QubeBeginDownEnv-v1',
entry_point='gym_brt.envs:QubeBeginDownEnv',
)
register(
id='QubeSwingupEnv-v1',
entry_point='gym_brt.envs:QubeSwingupEnv',
)
register(
id='QubeBeginUpEnv-v1',
entry_point='gym_brt.envs:QubeBeginUpEnv',
)
register(
id='QubeNoisyEnv-v1',
entry_point='gym_brt.envs:NoisyEnv',
)
register(
id='QubeRandomStartEnv-v1',
entry_point='gym_brt.envs:RandomStartEnv',
) | python |
#!/usr/bin/env python
# coding=UTF-8
#The first line allows this script to be executable
import os
import sys
import operator
from termcolor import colored
def boost_mode():
print colored('Warning: Some features may not be available except to Titan Series GPUs, nvidia-smi will tell you which ones you can do','red',attrs=['bold'])
gpu_clock = str(raw_input("Enter your maximum GPU clock in mhz (e.g. 1124): "))
mem_clock = str(raw_input("Enter your maximum memory clock in mhz (e.g. 960): "))
os.system('nvidia-smi -pm 1')
os.system('nvidia-smi -e 1')
cmd_String = 'nvidia-smi -ac %s,%s' % (mem_clock,gpu_clock)
os.system(cmd_String)
os.system('nvidia-smi --auto-boost-permission=0')
os.system('nvidia-smi --auto-boost-default=1')
print colored('[*] Clock set to 1124 mhz GPU, 960 mhz memory','yellow',attrs=['bold'])
main()
return
def monitor_systems():
cmd_String = "gnome-terminal -e 'bash -c \"nvidia-smi dmon; exec bash\"'"
os.system(cmd_String)
cmd_String = "gnome-terminal -e 'bash -c \"nvidia-smi stats; exec bash\"'"
os.system(cmd_String)
print colored('[*] All monitoring modes enabled','yellow',attrs=['bold'])
return
def main():
print colored('MAIN MENU','cyan',attrs=['bold'])
opt_List = [
'\n\t#0. Exit Program',
'#1. Set my video card to full constant-boost mode',
'#2. Activate all monitoring systems'
]
print ("\n\t".join(opt_List))
opt_Choice = str(raw_input("Enter a OPTION: "))
if opt_Choice == "0":
exit(0)
elif opt_Choice == "1":
os.system('clear')
boost_mode()
main()
elif opt_Choice == "2":
os.system('clear')
monitor_systems()
main()
main()
| python |
import torch
import numpy as np
import argparse
import os
import glob
from tqdm import tqdm
from collections import namedtuple
import sys
sys.path.append('../core')
from oan import OANet
from io_util import read_keypoints, read_descriptors, write_matches
class NNMatcher(object):
"""docstring for NNMatcher"""
def __init__(self, ):
super(NNMatcher, self).__init__()
def run(self, nkpts, descs):
# pts1, pts2: N*2 GPU torch tensor
# desc1, desc2: N*C GPU torch tensor
# corr: N*4
# sides: N*2
# corr_idx: N*2
pts1, pts2, desc1, desc2 = nkpts[0], nkpts[1], descs[0], descs[1]
d1, d2 = (desc1**2).sum(1), (desc2**2).sum(1)
distmat = (d1.unsqueeze(1) + d2.unsqueeze(0) - 2*torch.matmul(desc1, desc2.transpose(0,1))).sqrt()
dist_vals, nn_idx1 = torch.topk(distmat, k=2, dim=1, largest=False)
nn_idx1 = nn_idx1[:,0]
_, nn_idx2 = torch.topk(distmat, k=1, dim=0, largest=False)
nn_idx2= nn_idx2.squeeze()
mutual_nearest = (nn_idx2[nn_idx1] == torch.arange(nn_idx1.shape[0]).cuda())
ratio_test = dist_vals[:,0] / dist_vals[:,1].clamp(min=1e-15)
pts2_match = pts2[nn_idx1, :]
corr = torch.cat([pts1, pts2_match], dim=-1)
corr_idx = torch.cat([torch.arange(nn_idx1.shape[0]).unsqueeze(-1), nn_idx1.unsqueeze(-1).cpu()], dim=-1)
sides = torch.cat([ratio_test.unsqueeze(1), mutual_nearest.float().unsqueeze(1)], dim=1)
return corr, sides, corr_idx
def infer(self, kpt_list, desc_list):
nkpts = [torch.from_numpy(i[:,:2].astype(np.float32)).cuda() for i in kpt_list]
descs = [torch.from_numpy(desc.astype(np.float32)).cuda() for desc in desc_list]
corr, sides, corr_idx = self.run(nkpts, descs)
inlier_idx = np.where(sides[:,1].cpu().numpy())
matches = corr_idx[inlier_idx[0], :].numpy().astype('int32')
corr0 = kpt_list[0][matches[:, 0]]
corr1 = kpt_list[1][matches[:, 1]]
return matches, corr0, corr1
class LearnedMatcher(object):
def __init__(self, model_path, inlier_threshold=0, use_ratio=2, use_mutual=2):
self.default_config = {}
self.default_config['net_channels'] = 128
self.default_config['net_depth'] = 12
self.default_config['clusters'] = 500
self.default_config['use_ratio'] = use_ratio
self.default_config['use_mutual'] = use_mutual
self.default_config['iter_num'] = 1
self.default_config['inlier_threshold'] = inlier_threshold
self.default_config = namedtuple("Config", self.default_config.keys())(*self.default_config.values())
self.model = OANet(self.default_config)
print('load model from ' +model_path)
checkpoint = torch.load(model_path)
self.model.load_state_dict(checkpoint['state_dict'])
self.model.cuda()
self.model.eval()
self.nn_matcher = NNMatcher()
def normalize_kpts(self, kpts):
x_mean = np.mean(kpts, axis=0)
dist = kpts - x_mean
meandist = np.sqrt((dist**2).sum(axis=1)).mean()
scale = np.sqrt(2) / meandist
T = np.zeros([3,3])
T[0,0], T[1,1], T[2,2] = scale, scale, 1
T[0,2], T[1,2] = -scale*x_mean[0], -scale*x_mean[1]
nkpts = kpts * np.asarray([T[0, 0], T[1, 1]]) + np.array([T[0, 2], T[1, 2]])
return nkpts
def infer(self, kpt_list, desc_list):
with torch.no_grad():
nkpts = [torch.from_numpy(self.normalize_kpts(i[:,:2]).astype(np.float32)).cuda() for i in kpt_list]
descs = [torch.from_numpy(desc.astype(np.float32)).cuda() for desc in desc_list]
corr, sides, corr_idx = self.nn_matcher.run(nkpts, descs)
corr, sides = corr.unsqueeze(0).unsqueeze(0), sides.unsqueeze(0)
data = {}
data['xs'] = corr
# currently supported mode:
if self.default_config.use_ratio==2 and self.default_config.use_mutual==2:
data['sides'] = sides
elif self.default_config.use_ratio==0 and self.default_config.use_mutual==1:
mutual = sides[0,:,1]>0
data['xs'] = corr[:,:,mutual,:]
data['sides'] = []
corr_idx = corr_idx[mutual,:]
elif self.default_config.use_ratio==1 and self.default_config.use_mutual==0:
ratio = sides[0,:,0] < 0.8
data['xs'] = corr[:,:,ratio,:]
data['sides'] = []
corr_idx = corr_idx[ratio,:]
elif self.default_config.use_ratio==1 and self.default_config.use_mutual==1:
mask = (sides[0,:,0] < 0.8) & (sides[0,:,1]>0)
data['xs'] = corr[:,:,mask,:]
data['sides'] = []
corr_idx = corr_idx[mask,:]
elif self.default_config.use_ratio==0 and self.default_config.use_mutual==0:
data['sides'] = []
else:
raise NotImplementedError
y_hat, e_hat = self.model(data)
y = y_hat[-1][0, :].cpu().numpy()
inlier_idx = np.where(y > self.default_config.inlier_threshold)
matches = corr_idx[inlier_idx[0], :].numpy().astype('int32')
corr0 = kpt_list[0][matches[:, 0]]
corr1 = kpt_list[1][matches[:, 1]]
return matches, corr0, corr1
def str2bool(v):
return v.lower() in ("true", "1")
# Parse command line arguments.
parser = argparse.ArgumentParser(description='extract sift.')
parser.add_argument('--input_path', type=str, default='/home/liao/zjh/datasets/',
help='Image directory or movie file or "camera" (for webcam).')
parser.add_argument('--seqs', type=str, default='Fountain',
help='split by .')
parser.add_argument('--img_glob', type=str, default='*',
help='Glob match if directory of images is specified (default: \'*.png\').')
parser.add_argument('--input_suffix', type=str, default='sift-8000',
help='prefix of filename.')
parser.add_argument('--output_suffix', type=str, default='sift-8000-our',
help='prefix of filename.')
parser.add_argument('--use_prev_pairs', type=str2bool, default=False,
help='use previous image pairs')
parser.add_argument('--prev_output_suffix', type=str, default='sift-8000',
help='previous image pairs suffix')
parser.add_argument('--inlier_threshold', type=float, default=0,
help='inlier threshold. default: 0')
parser.add_argument('--use_learned_matcher', type=str2bool, default=True,
help='False: learned matcher, True: NN matcher')
parser.add_argument('--use_mutual', type=int, default=2,
help='0: not use mutual. 1: use mutual before learned matcher. 2: use mutual as side information')
parser.add_argument('--use_ratio', type=int, default=2,
help='0: not use ratio test. 1: use ratio test before learned matcher. 2: use ratio test as side information')
def dump_match(matcher, img1_name, img2_name, base_dir, input_suffix, output_suffix):
kpt1_name = os.path.join(base_dir, 'keypoints', img1_name+'.'+input_suffix+'.bin')
kpt2_name = os.path.join(base_dir, 'keypoints', img2_name+'.'+input_suffix+'.bin')
desc1_name = os.path.join(base_dir, 'descriptors', img1_name+'.'+input_suffix+'.bin')
desc2_name = os.path.join(base_dir, 'descriptors', img2_name+'.'+input_suffix+'.bin')
kpt1, kpt2 = read_keypoints(kpt1_name), read_keypoints(kpt2_name)
desc1, desc2 = read_descriptors(desc1_name), read_descriptors(desc2_name)
match_name = img1_name+'---'+img2_name+'.'+output_suffix+'.bin'
match_name = os.path.join(base_dir, 'matches', match_name)
matches, _, _ = matcher.infer([kpt1, kpt2], [desc1, desc2])
write_matches(match_name, matches)
if __name__ == "__main__":
opt = parser.parse_args()
seqs = opt.seqs.split('.')
if not opt.use_learned_matcher:
matcher = NNMatcher()
else:
if opt.use_ratio < 2 and opt.use_mutual < 2:
model_path = os.path.join('../model', 'sift-8k/model_best.pth')
matcher = LearnedMatcher(model_path, opt.inlier_threshold, use_ratio=opt.use_ratio, use_mutual=opt.use_mutual)
elif opt.use_ratio == 2 and opt.use_mutual == 2:
model_path = os.path.join('../model', 'sift-side-8k/model_best.pth')
matcher = LearnedMatcher(model_path, opt.inlier_threshold, use_ratio=2, use_mutual=2)
else:
raise NotImplementedError
for seq in seqs:
if not os.path.exists(opt.input_path+seq+'/matches'):
os.system('mkdir '+opt.input_path+seq+'/matches')
if not opt.use_prev_pairs:
# get image lists
search = os.path.join(opt.input_path, seq, 'images', opt.img_glob)
listing = glob.glob(search)
listing.sort()
pairs = []
for img1 in range(len(listing)):
for img2 in range(len(listing))[img1+1:]:
img1_name, img2_name = listing[img1].split('/')[-1], listing[img2].split('/')[-1]
pairs += [[img1_name, img2_name]]
else:
search = os.path.join(opt.input_path, seq, 'matches', "*---*."+opt.prev_output_suffix+'.bin')
listing = glob.glob(search)
pairs = [os.path.basename(path[:-5-len(opt.prev_output_suffix)]).split("---") for path in listing]
for pair in tqdm(pairs):
img1_name, img2_name = pair[0], pair[1]
dump_match(matcher, img1_name, img2_name, os.path.join(opt.input_path, seq), opt.input_suffix, opt.output_suffix)
| python |
class WrongState(Exception):
def __init__(self, value, sessionState=None):
self.value = value
self.state = sessionState
def __str__(self):
return repr(self.value)
| python |
from django.shortcuts import render
from .models import Chat
from .serializers import ChatSerializer
from rest_framework import viewsets
# Create your views here.
class ChatViewSet(viewsets.ModelViewSet):
serializer_class = ChatSerializer
queryset = Chat.objects.all() | python |
import os
import shutil
import requests
import zipfile
import bz2
import tarfile
from splendor.home import get_splendor_home
from splendor.assets import install_assets
from splendor.download import download, agree_to_zip_licenses
import ltron.settings as settings
from ltron.home import get_ltron_home, make_ltron_home
from ltron.license import ldcad_license_text
ltron_home = get_ltron_home()
def install_ldraw(overwrite=False):
print('='*80)
print('Installing LDraw')
make_ltron_home()
print('-'*80)
complete_zip_path = os.path.join(ltron_home, 'complete.zip')
downloaded_path = download(
settings.urls['ldraw'],
complete_zip_path,
overwrite=overwrite,
)
print('-'*80)
print('Checking for Licenses')
if agree_to_zip_licenses(complete_zip_path):
print('Extracting Contents To: %s'%ltron_home)
with zipfile.ZipFile(complete_zip_path, 'r') as z:
z.extractall(ltron_home)
else:
print('Must agree to all licensing. Aborting LDraw install.')
def ldcad_license_agreement():
print('LDCad is a necessary component of LTRON '
'and is provided under the following license:')
print(ldcad_license_text)
print('Agree? (y/n)')
yn = input()
return yn in 'yY'
def install_ldcad(overwrite=True):
print('='*80)
print('Installing LDCad')
make_ltron_home()
print('-'*80)
# download
ldcad_url = settings.urls['ldcad']
ldcad_bz2_filename = ldcad_url.split('/')[-1]
ldcad_bz2_path = os.path.join(ltron_home, ldcad_bz2_filename)
download(ldcad_url, ldcad_bz2_path, overwrite=overwrite)
print('-'*80)
if not ldcad_license_agreement():
print('Must agree to all licensing. Aborting LDCad intall.')
return False
# unbz2
ldcad_tar_path = ldcad_bz2_path.replace('.bz2', '')
print('-'*80)
print('Extracting bz2 archive to: %s'%ldcad_tar_path)
with open(ldcad_bz2_path, 'rb') as f_in:
data = bz2.decompress(f_in.read())
with open(ldcad_tar_path, 'wb') as f_out:
f_out.write(data)
# untar
ldcad_path = ldcad_tar_path.replace('.tar', '')
print('-'*80)
print('Extracting tar archive to: %s'%ldcad_path)
with tarfile.open(ldcad_tar_path, 'r:') as f:
f.extractall(ltron_home)
# unzip shadow
print('-'*80)
print('Unzipping shadow')
shadow_seed_path = os.path.join(ldcad_path, 'seeds', 'shadow.sf')
ldcad_shadow_path = os.path.join(ldcad_path, 'shadow')
if not os.path.exists(ldcad_shadow_path):
os.makedirs(ldcad_shadow_path)
with zipfile.ZipFile(shadow_seed_path, 'r') as z:
z.extractall(ldcad_shadow_path)
# unzip offLib
print('-'*80)
print('Unzipping offLibShadow')
ldcad_offlibshadow_csl_path = os.path.join(
ldcad_shadow_path, 'offLib', 'offLibShadow.csl')
ldcad_offlibshadow_path = os.path.join(
ldcad_shadow_path, 'offLib', 'offLibShadow')
if not os.path.exists(ldcad_offlibshadow_path):
os.makedirs(ldcad_offlibshadow_path)
with zipfile.ZipFile(ldcad_offlibshadow_csl_path, 'r') as z:
z.extractall(ldcad_offlibshadow_path)
def install_collection(name, overwrite=False):
print('='*80)
print('Installing %s Data Collection'%name)
print('-'*80)
zip_path = os.path.join(settings.paths['collections'], '%s.zip'%name)
download(settings.urls[name], zip_path, overwrite=overwrite)
print('-'*80)
print('Extracting collection %s'%name)
extract_path = os.path.join(settings.paths['collections'], name)
if not os.path.exists(extract_path) or overwrite:
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(settings.paths['collections'])
else:
print('Already extracted.')
def install_splendor_meshes(resolution, overwrite=False):
print('='*80)
print('Installing Splendor Meshes (%s)'%resolution)
print('-'*80)
asset_name = 'ltron_assets_%s'%resolution
install_assets(settings.urls[asset_name], asset_name, overwrite=overwrite)
splendor_home = get_splendor_home()
resolution_path = os.path.join(splendor_home, asset_name)
resolution_cfg_path = resolution_path + '.cfg'
generic_cfg_path = os.path.join(splendor_home, 'ltron_assets.cfg')
if os.path.exists(generic_cfg_path):
os.unlink(generic_cfg_path)
os.symlink(resolution_cfg_path, generic_cfg_path)
#generic_path = os.path.join(splendor_home, 'ltron_assets')
#if os.path.exists(generic_path):
# os.unlink(generic_path)
#os.symlink(resolution_path, generic_path)
default_settings_cfg = '''
[DEFAULT]
datasets = {HOME}/datasets
collections = {HOME}/collections
[paths]
ldraw = {HOME}/ldraw
ldcad = {HOME}/LDCad-1-6d-Linux
shadow = %(ldcad)s/shadow
shadow_ldraw = %(shadow)s/offLib/offLibShadow
[datasets]
random_six = %(collections)s/random_six/random_six.json
#snap_one = %(collections)s/snap_one/snap_one.json
#snap_one_frames = %(collections)s/snap_one/snap_one_frames.json
#snap_four = %(collections)s/snap_four/snap_four.json
#snap_four_frames = %(collections)s/snap_four/snap_four_frames.json
#conditional_snap_two = %(collections)s/conditional_snap_two/conditional_snap_two.json
#conditional_snap_two_frames = %(collections)s/conditional_snap_two/conditional_snap_two_frames.json
[collections]
omr = %(collections)s/omr
random_six = %(collections)s/random_six
#snap_one = %(collections)s/snap_one
#snap_four = %(collections)s/snap_four
#conditional_snap_two = %(collections)s/conditional_snap_two
[urls]
ltron = https://github.com/aaronwalsman/ltron
ldraw = http://www.ldraw.org/library/updates/complete.zip
ldcad = http://www.melkert.net/action/download/LDCad-1-6d-Linux.tar.bz2
ldcad_home = http://www.melkert.net/LDCad
omr_ldraw = https://omr.ldraw.org
omr = https://drive.google.com/uc?id=1nr3uut3QK2qCzRm3VjYKc4HNgsum8hLf
random_six = https://drive.google.com/uc?id=11K6Zu59aU7EXRcsY_ALcOJG1S2aXcVXz
ltron_assets_low = https://drive.google.com/uc?id=11p_vyeL_B_BK7gupI8_JvGGbffJ2kXiG
ltron_assets_high = https://drive.google.com/uc?id=1wIw-0YXx9QkQ9Kjpcvv5XsZFqdZrGj6U
'''
def make_settings_cfg(overwrite=False):
settings_path = os.path.join(ltron_home, 'settings.cfg')
if not os.path.exists(settings_path) or overwrite:
print('Writing default settings file to: %s'%settings_path)
with open(settings_path, 'w') as f:
f.write(default_settings_cfg)
else:
print('Settings file already exists: %s'%settings_path)
| python |
# 准备U-net训练数据
from scipy import ndimage as ndi
import numpy
import cv2
MASK_MARGIN = 5
def make_mask(v_center, v_diam, width, height):
mask = numpy.zeros([height, width])
v_xmin = numpy.max([0, int(v_center[0] - v_diam) - MASK_MARGIN])
v_xmax = numpy.min([width - 1, int(v_center[0] + v_diam) + MASK_MARGIN])
v_ymin = numpy.max([0, int(v_center[1] - v_diam) - MASK_MARGIN])
v_ymax = numpy.min([height - 1, int(v_center[1] + v_diam) + MASK_MARGIN])
v_xrange = range(v_xmin, v_xmax + 1)
v_yrange = range(v_ymin, v_ymax + 1)
for v_x in v_xrange:
for v_y in v_yrange:
p_x = v_x
p_y = v_y
if numpy.linalg.norm(numpy.array([v_center[0], v_center[1]])\
- numpy.array([p_x, p_y]))<= v_diam * 2:
mask[p_y, p_x] = 1.0 # 设置节点区域的像素值为1
return mask
if __name__ == '__main__':
imagePath = './data/chaper3_img_01.png'
# 读取dicom文件的元数据(dicom tags)
img = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
print('before resize: ', img.shape)
img_X = ndi.interpolation.zoom(img, [320/512, 320/512], mode='nearest') # 被缩放成了320
print('after resize: ', img_X.shape)
# cv2.imwrite('./temp_dir/chapter3_img_XX.png', img_X)
img_Y = make_mask((217, 160), 3, 320, 320) # 结节信息由标注文件给出
img_Y[img_Y < 0.5] = 0
img_Y[img_Y > 0.5] = 255
nodule_mask = img_Y.astype('uint8')
# cv2.imwrite('./temp_dir/chapter3_img_Y.png', img_Y)
| python |
import numpy as np
import pymarketstore as pymkts
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from pymarketstore.proto import marketstore_pb2_grpc
from pymarketstore.proto.marketstore_pb2 import MultiQueryRequest, QueryRequest
def test_grpc_client_init():
c = pymkts.GRPCClient("127.0.0.1:5995")
assert c.endpoint == "127.0.0.1:5995"
assert isinstance(c.stub, marketstore_pb2_grpc.MarketstoreStub)
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_query(stub):
# --- given ---
c = pymkts.GRPCClient()
p = pymkts.Params('BTC', '1Min', 'OHLCV')
# --- when ---
c.query(p)
# --- then ---
assert c.stub.Query.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_create(stub):
# --- given ---
c = pymkts.GRPCClient()
dtype = [('Epoch', 'i8'), ('Bid', 'f4'), ('Ask', 'f4')]
tbk = 'TEST/1Min/TICK'
# --- when ---
c.create(tbk=tbk, dtype=dtype, isvariablelength=False)
# --- then ---
assert c.stub.Create.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_write(stub):
# --- given ---
c = pymkts.GRPCClient()
data = np.array([(1, 0)], dtype=[('Epoch', 'i8'), ('Ask', 'f4')])
tbk = 'TEST/1Min/TICK'
# --- when ---
c.write(data, tbk)
# --- then ---
assert c.stub.Write.called == 1
def test_build_query():
# --- given ---
c = pymkts.GRPCClient(endpoint="127.0.0.1:5995")
p = pymkts.Params('TSLA', '1Min', 'OHLCV', 1500000000, 4294967296)
# --- when ---
query = c.build_query([p])
# --- then ---
assert query == MultiQueryRequest(
requests=[QueryRequest(destination="TSLA/1Min/OHLCV", epoch_start=1500000000, epoch_end=4294967296)])
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_list_symbols(stub):
# --- given ---
c = pymkts.GRPCClient()
# --- when ---
c.list_symbols()
# --- then ---
assert c.stub.ListSymbols.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_destroy(stub):
# --- given ---
c = pymkts.GRPCClient()
tbk = 'TEST/1Min/TICK'
# --- when ---
c.destroy(tbk)
# --- then ---
assert c.stub.Destroy.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_server_version(stub):
# --- given ---
c = pymkts.GRPCClient()
# --- when ---
c.server_version()
# --- then ---
assert c.stub.ServerVersion.called == 1
| python |
import unittest
import pathlib
import wellcad.com
from ._extra_asserts import ExtraAsserts
from ._sample_path import SamplePath
class TestLithoPattern(unittest.TestCase, ExtraAsserts, SamplePath):
@classmethod
def setUpClass(cls):
cls.app = wellcad.com.Application()
cls.sample_path = cls._find_sample_path()
cls.borehole = cls.app.open_borehole(str(cls.sample_path / "Core Description.wcl"))
cls.litho_log = cls.borehole.get_log("lithology")
cls.dict = cls.litho_log.litho_dictionary
cls.pattern = cls.dict.litho_pattern(0)
@classmethod
def tearDownClass(cls):
cls.app.quit(False)
def test_code(self):
self.assertAttrEqual(self.pattern, "code", '#5')
def test_description(self):
self.assertAttrEqual(self.pattern, "description", 'Sand Color')
def test_width(self):
self.assertAlmostEqual(self.pattern.width, 20, 3)
def test_height(self):
self.assertAlmostEqual(self.pattern.height, 20, 3)
def test_repeatable(self):
self.assertEqual(self.pattern.repeatable, True)
if __name__ == '__main__':
unittest.main()
| python |
import os
from flask_apispec import MethodResource
from flask_apispec import doc
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
from decorator.verify_admin_access import verify_admin_access
class GetMailContent(MethodResource, Resource):
def __init__(self, db):
self.db = db
@log_request
@doc(tags=['mail'],
description='Get the HTML content of the specified mail template name (new_account or reset_password)',
responses={
"200": {},
"404": {"description": "This mail template does not exist"},
})
@jwt_required
@verify_admin_access
@catch_exception
def get(self, name):
if name in ["new_account", "reset_password"]:
with open(os.path.join(os.path.dirname(__file__), "..", "..", "template", f"{name}.html"), "r") as f:
data = f.read()
else:
return "", "404 This mail template does not exist"
return data, "200 "
| python |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
from struct import unpack
INPUT_FILENAME = sys.argv[1]
OUTPUT_FILENAME = sys.argv[2]
with open(INPUT_FILENAME, "rb") as f:
data = f.read()
words = len(data) // 4
if len(data) % 4 != 0:
print("Warning: input length not word aligned")
str = "<L%i" % words
print("Data length %i" % len(data))
data = unpack("<%iL" % words, data)
str = "analyzer = (\n "
count = 0
for val in data:
if count % 8 == 7:
str += "0x{:08x},\n ".format(val)
else:
str += "0x{:08x}, ".format(val)
count += 1
str += "\n )"
data = str
with open(OUTPUT_FILENAME, "w") as f:
f.write(data)
| python |
#!/usr/bin/env python
import cv2
from argparse import ArgumentParser
from time import time
from core.detectors import CornerNet_Saccade, CornerNet_Squeeze
from core.vis_utils import draw_bboxes
def main(args):
cam = cv2.VideoCapture(args.device)
if args.codec == 'YUY2':
cam.set(cv2.CAP_PROP_FOURCC, 844715353.0)
elif args.codec == 'MJPG':
cam.set(cv2.CAP_PROP_FOURCC, 0x47504A4D)
else:
print('use default video codec.')
if args.resolution:
cam.set(cv2.CAP_PROP_FRAME_WIDTH, args.resolution[0])
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,args.resolution[1])
detector = CornerNet_Squeeze(model_name=args.model) if args.model else CornerNet_Squeeze()
frame_count = 0
init_time = time()
tic = time()
try:
while True:
# Capture frame-by-frame
if cam.grab():
_, frame = cam.retrieve()
bboxes = detector(frame)
frame = draw_bboxes(frame, bboxes)
toc = time()
frame_count += 1
else:
continue
# Calculate fps
if toc - init_time > 3:
fps = frame_count / (toc - tic)
print('{:.2f}: {} x {} @ {:5.1f}'.format(time(), frame.shape[1], frame.shape[0], fps))
if toc -tic > 3:
tic = time()
frame_count = 0
# Show the resulting frame
if args.visual:
frame = cv2.resize(frame, (0, 0), fx=args.scale, fy=args.scale)
cv2.imshow('/dev/video{}'.format(args.device), frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
print('\nKeyboardInterrupt')
pass
# When everything done, release the capture
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-d', '--device', help='device number: /dev/video#', type=int, default=0)
parser.add_argument('-c', '--codec', help='video codec: MJPG/YUY2')
parser.add_argument('-v', '--visual', action='store_true', dest='visual', help='Show image frame')
parser.add_argument('-r', '--resolution', nargs='+', type=float, help='resolution: w, h')
parser.add_argument('-s', '--scale', type=float, help='output frame scale: [0.25]', default=0.25)
parser.add_argument('-m', '--model', type=str, help='model name')
args = parser.parse_args()
main(args)
| python |
import maya.cmds as cmds
import maya.api.OpenMaya as apiOpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import json
import os
import math
import sys
import re
import struct
from collections import OrderedDict
from copy import deepcopy
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
######################################################
# PluginFunctions
######################################################
class PluginFunctions():
######################################################
# getAllNodesOfType
######################################################
@staticmethod
def getAllNodesOfType(typeId):
list = cmds.ls( type='transform', long=True )
result = []
for node in list:
# find type attribute
sphAttr = cmds.listAttr(node, string="SPH_Type")
if sphAttr != None:
sphtype = cmds.getAttr(node + ".SPH_Type")
if typeId == sphtype:
result.append(node)
return result
######################################################
# getShape
######################################################
@staticmethod
def getShape(nodeName):
return cmds.listRelatives(nodeName, shapes=True, type="shape")
######################################################
# get quaternion of a transform node
######################################################
@staticmethod
def getQuaternion(node):
sel_list = apiOpenMaya.MSelectionList()
sel_list.add(node)
obj = sel_list.getDependNode(0)
xform = apiOpenMaya.MFnTransform(obj)
quat = xform.rotation(asQuaternion=True)
quat.normalizeIt()
return quat
######################################################
# get axis,angle of a transform node
######################################################
@staticmethod
def getAxisAngle(node):
sel_list = apiOpenMaya.MSelectionList()
sel_list.add(node)
obj = sel_list.getDependNode(0)
xform = apiOpenMaya.MFnTransform(obj)
quat = xform.rotation(asQuaternion=True)
quat.normalizeIt()
aa = quat.asAxisAngle()
return ([aa[0][0], aa[0][1], aa[0][2]], aa[1])
@staticmethod
def createFloatAttr(longName, shortName, defaultValue, softMin, softMax, minValue=0, maxValue=1000000):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kFloat, defaultValue )
nAttr.setStorable(1)
nAttr.setMin(minValue)
nAttr.setMax(maxValue)
nAttr.setSoftMin(softMin)
nAttr.setSoftMax(softMax)
return newAttr
@staticmethod
def createIntAttr(longName, shortName, defaultValue, softMin, softMax, minValue=0, maxValue=1000000):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kInt, defaultValue )
nAttr.setStorable(1)
nAttr.setMin(minValue)
nAttr.setMax(maxValue)
nAttr.setSoftMin(softMin)
nAttr.setSoftMax(softMax)
return newAttr
@staticmethod
def createBoolAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kBoolean, defaultValue )
nAttr.setStorable(1)
return newAttr
@staticmethod
def createVec3Attr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.k3Float )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createColorAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.createColor( longName, shortName )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createVec3iAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.k3Int )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createEnumAttr(longName, shortName, defaultValue, enumList):
eAttr = OpenMaya.MFnEnumAttribute()
newAttr = eAttr.create( longName, shortName, defaultValue)
i=0
for item in enumList:
eAttr.addField(item, i)
i+=1
eAttr.setStorable(1)
return newAttr
@staticmethod
def createStringAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnTypedAttribute()
sData = OpenMaya.MFnStringData()
default = sData.create(defaultValue)
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnData.kString, default )
nAttr.setStorable(1)
return newAttr
######################################################
# createBoolParam
######################################################
@staticmethod
def createBoolParam(name, label, description, defaultValue):
param = {
"type": "bool",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createFloatParam
######################################################
@staticmethod
def createFloatParam(name, label, description, defaultValue, minValue, maxValue, fieldMin=0, fieldMax=1000000):
param = {
"type": "float",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"min": minValue,
"max": maxValue,
"fieldMin": fieldMin,
"fieldMax": fieldMax,
"ctrlId": None
}
return param
######################################################
# createVec3Param
######################################################
@staticmethod
def createVec3Param(name, label, description, defaultValue):
param = {
"type": "vec3",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createColorParam
######################################################
@staticmethod
def createColorParam(name, label, description, defaultValue):
param = {
"type": "color",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createVec3iParam
######################################################
@staticmethod
def createVec3iParam(name, label, description, defaultValue):
param = {
"type": "vec3i",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createIntParam
######################################################
@staticmethod
def createIntParam(name, label, description, defaultValue, minValue, maxValue, fieldMin=0, fieldMax=1000000):
param = {
"type": "int",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"min": minValue,
"max": maxValue,
"fieldMin": fieldMin,
"fieldMax": fieldMax,
"ctrlId": None
}
return param
######################################################
# createStringParam
######################################################
@staticmethod
def createStringParam(name, label, description, defaultValue):
param = {
"type": "string",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createEnumParam
######################################################
@staticmethod
def createEnumParam(name, label, description, defaultValue, enumList):
param = {
"type": "enum",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"enumList": enumList,
"ctrlId": None
}
return param
######################################################
# getSelectedTransforms
# get all selected transform nodes recursively
######################################################
@staticmethod
def getSelectedTransforms():
list = cmds.ls( selection=True, type='transform', long=True )
transformNodes = []
for item in list:
transformNodes.append(item)
children = cmds.listRelatives(item, ad=True, type="transform")
if children == None:
continue
for child in children:
transformNodes.append(child)
return transformNodes
######################################################
# createCircularEmitter
######################################################
class createCircularEmitterCmd(OpenMayaMPx.MPxCommand):
s_name = "createCircularEmitter"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return createCircularEmitterCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
self.cyl = cmds.polyCylinder(name="CircularEmitter", r=1, h=0.2, sx=20, sy=1, sz=1, ax=[1,0,0], rcp=0, cuv=3, ch=1)
cmds.delete(ch=True)
node = self.cyl[0]
cmds.delete(node + ".f[40:59]")
cmds.scale(0.5, 0.5, 0.5, self.cyl[0])
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "CircularEmitter", type="string")
# velocity
cmds.addAttr(node, longName="SPH_velocity", niceName="velocity", at="float");
cmds.setAttr((node + '.SPH_velocity'), 1.0)
# start time
cmds.addAttr(node, longName="SPH_startTime", niceName="start time", at="float");
cmds.setAttr((node + '.SPH_startTime'), 0.0)
# velocity
cmds.addAttr(node, longName="SPH_endTime", niceName="end time", at="float");
cmds.setAttr((node + '.SPH_endTime'), 100000.0)
# fluid id
cmds.addAttr(node, longName="SPH_fluidId", niceName="Fluid id", dt="string")
cmds.setAttr((node + '.SPH_fluidId'), "Fluid", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# RectangularEmitter
######################################################
class createRectangularEmitterCmd(OpenMayaMPx.MPxCommand):
s_name = "createRectangularEmitter"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return createRectangularEmitterCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
self.cube = cmds.polyCube(name="RectangularEmitter", w=0.2, h=1, d=1, sx=1, sy=1, sz=1, ch=1)
cmds.delete(ch=True)
node = self.cube[0]
cmds.delete(node + ".f[4]")
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "RectangularEmitter", type="string")
# velocity
cmds.addAttr(node, longName="SPH_velocity", niceName="velocity", at="float");
cmds.setAttr((node + '.SPH_velocity'), 1.0)
# start time
cmds.addAttr(node, longName="SPH_startTime", niceName="start time", at="float");
cmds.setAttr((node + '.SPH_startTime'), 0.0)
# velocity
cmds.addAttr(node, longName="SPH_endTime", niceName="end time", at="float");
cmds.setAttr((node + '.SPH_endTime'), 100000.0)
# fluid id
cmds.addAttr(node, longName="SPH_fluidId", niceName="Fluid id", dt="string")
cmds.setAttr((node + '.SPH_fluidId'), "Fluid", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# AnimationField
######################################################
class createAnimationFieldCmd(OpenMayaMPx.MPxCommand):
s_name = "createAnimationField"
s_shortTypeFlag = '-s'
s_longTypeFlag = '-shape'
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def syntaxCreator():
syntax = OpenMaya.MSyntax()
syntax.addFlag( createAnimationFieldCmd.s_shortTypeFlag, createAnimationFieldCmd.s_longTypeFlag, OpenMaya.MSyntax.kLong )
return syntax
@staticmethod
def creator():
return createAnimationFieldCmd()
def doIt(self, args):
argData = OpenMaya.MArgParser( self.syntax(), args )
self.shapeType = 0
if argData.isFlagSet( createAnimationFieldCmd.s_shortTypeFlag ):
self.shapeType = argData.flagArgumentInt(createAnimationFieldCmd.s_shortTypeFlag, 0)
self.redoIt()
def redoIt(self):
poly = ""
if self.shapeType == 1:
poly = cmds.polySphere(name="AnimationField", r=1, sx=20, sy=20, ax=[0,1,0], cuv=2, ch=1)
cmds.expression(s=poly[0] + ".scaleY=" + poly[0] + ".scaleZ=" + poly[0] + ".scaleX;", o=poly[0])
elif self.shapeType == 2:
poly = cmds.polyCylinder(name="AnimationField", r=1, h=1, sx=20, sy=1, ax=[1,0,0], cuv=3, rcp=0, ch=1)
cmds.expression(s=poly[0] + ".scaleZ=" + poly[0] + ".scaleY;", o=poly[0])
else:
poly = cmds.polyCube(name="AnimationField", w=1, h=1, d=1, sx=1, sy=1, sz=1, ch=1)
cmds.delete(ch=True)
node = poly[0]
# set type
cmds.addAttr(node, longName="SPH_shapeType", niceName="shape type", at="long", hidden=True)
cmds.setAttr((node + '.SPH_shapeType'), self.shapeType)
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "AnimationField", type="string")
# set particle field
cmds.addAttr(node, longName="SPH_particleField", niceName="paricle field",dt="string")
cmds.setAttr((node + '.SPH_particleField'), "velocity", type="string")
# set expression
cmds.addAttr(node, longName="SPH_expressionX", niceName="expression - x",dt="string")
cmds.setAttr((node + '.SPH_expressionX'), "", type="string")
cmds.addAttr(node, longName="SPH_expressionY", niceName="expression - y",dt="string")
cmds.setAttr((node + '.SPH_expressionY'), "", type="string")
cmds.addAttr(node, longName="SPH_expressionZ", niceName="expression - z",dt="string")
cmds.setAttr((node + '.SPH_expressionZ'), "", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# convertToFluid
#
# Converts a list of transform nodes to fluid models.
# Only nodes with a shape are converted.
######################################################
class convertToFluidCmd(OpenMayaMPx.MPxCommand):
s_name = "convertToFluid"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return convertToFluidCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
nodes = PluginFunctions.getSelectedTransforms()
self.convertToFluid(nodes)
def convertToFluid(self, nodes):
for node in nodes:
shapeNode = PluginFunctions.getShape(node)
if shapeNode != None:
lst = cmds.listRelatives(node, children=True, type='SPHFluidNode' )
if (lst == None):
cmds.createNode("SPHFluidNode", name="SPH_Fluid", parent=node)
else:
print("The node " + node + " is already an SPH fluid.")
######################################################
# convertToRigidBody
#
# Converts a list of transform nodes to rigid bodies.
# Only nodes with a shape are converted.
######################################################
class convertToRigidBodiesCmd(OpenMayaMPx.MPxCommand):
s_name = "convertToRigidBodies"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return convertToRigidBodiesCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
nodes = PluginFunctions.getSelectedTransforms()
self.convertToRigidBody(nodes)
def convertToRigidBody(self, nodes):
for node in nodes:
shapeNode = PluginFunctions.getShape(node)
if shapeNode != None:
lst = cmds.listRelatives(node, children=True, type='SPHRigidBodyNode' )
if (lst == None):
cmds.createNode("SPHRigidBodyNode", name="SPH_Rigid_Body", parent=node)
else:
print("The node " + node + " is already an SPH rigid body.")
######################################################
# saveModel
######################################################
class saveModelCmd(OpenMayaMPx.MPxCommand):
s_name = "saveModel"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return saveModelCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=True )
if len(sphConfigList) == 0:
cmds.warning("Not saved since no SPH configuration node was found.")
return
if not cmds.pluginInfo("objExport", query=True, loaded=True):
cmds.loadPlugin("objExport")
fileName = cmds.fileDialog2(ff="*.json", fm=0, dir="")
scenePath = os.path.dirname(fileName[0])
scene=self.generateScene(scenePath)
if scene == None:
return
f = open(fileName[0], 'w')
json_str = json.dumps(scene, sort_keys=True,indent=4, separators=(',', ': '))
f.write(json_str)
f.close()
def isUndoable(self):
return False
######################################################
# openFluidIdDialog
######################################################
def openFluidIdDialog(self):
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=False )
cmds.columnLayout( adjustableColumn=True, columnOffset=["both", 10], rowSpacing=10, columnAlign="center" )
cmds.textScrollList("SPHFluidIdList", numberOfRows=8, allowMultiSelection=False,
append=sphConfigList,
selectItem=sphConfigList[0], showIndexedItem=1)
cmds.rowLayout(numberOfColumns=2)
cmds.button("Ok", c='cmds.layoutDialog( dismiss="Ok " + cmds.textScrollList("SPHFluidIdList",q=True,selectItem=True)[0] )' )
cmds.button("Cancel", c='cmds.layoutDialog( dismiss="Cancel" )')
######################################################
# generate scene
######################################################
def generateScene(self, scenePath):
scene = OrderedDict()
scene['FluidModels'] = []
scene['RigidBodies'] = []
scene['Emitters'] = []
scene['AnimationFields'] = []
scene['Materials'] = []
scene['Configuration'] = OrderedDict()
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=True )
sphConfig = ""
if len(sphConfigList) == 0:
cmds.warning("Not saved since no SPH configuration node was found.")
return None
elif len(sphConfigList) > 1:
sphConfig = sphConfigList[0]
res = cmds.layoutDialog(ui=self.openFluidIdDialog)
if res == "Cancel":
return None
else:
sphConfig = res[3:]
else:
sphConfig = sphConfigList[0]
#cmds.warning("More than one SPH configuration node was found using " + sphConfigList[0] + ".")
attributes = cmds.listAttr(sphConfig, string="SPH_*", sn=False)
for attr in attributes:
if cmds.getAttr(sphConfig + "." + attr, type=True) == "float3":
value = cmds.getAttr(sphConfig + "." + attr)[0]
elif cmds.getAttr(sphConfig + "." + attr, type=True) == "long3":
value = cmds.getAttr(sphConfig + "." + attr)[0]
else:
value = cmds.getAttr(sphConfig + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=sphConfig, listParent=True )
if parent == None:
scene["Configuration"][attr[4:]] = value
fluidConfigList = cmds.ls( type='SPHFluidConfigurationNode', long=False )
if len(fluidConfigList) == 0:
cmds.warning("Not saved since no fluid material node was found.")
return
for fluid in fluidConfigList:
attributes = cmds.listAttr(fluid, string="SPH_*", sn=False)
mat = OrderedDict()
mat['id'] = fluid
for attr in attributes:
if cmds.getAttr(fluid + "." + attr, type=True) == "float3":
value = cmds.getAttr(fluid + "." + attr)[0]
elif cmds.getAttr(fluid + "." + attr, type=True) == "long3":
value = cmds.getAttr(fluid + "." + attr)[0]
else:
value = cmds.getAttr(fluid + "." + attr)
mat[attr[4:]] = value
scene["Materials"].append(mat)
rigidBodyList = cmds.ls( type='SPHRigidBodyNode', long=False )
for rb in rigidBodyList:
self.addRigidBody(scene, rb, scenePath)
fluidList = cmds.ls( type='SPHFluidNode', long=False )
for fluid in fluidList:
self.addFluid(scene, fluid, scenePath)
emitters = PluginFunctions.getAllNodesOfType("RectangularEmitter")
for emitter in emitters:
self.addRectangularEmitter(sphConfig, scene, emitter, scenePath)
emitters = PluginFunctions.getAllNodesOfType("CircularEmitter")
for emitter in emitters:
self.addCircularEmitter(sphConfig, scene, emitter, scenePath)
animFields = PluginFunctions.getAllNodesOfType("AnimationField")
for animField in animFields:
self.addAnimationField(sphConfig, scene, animField, scenePath)
return scene
######################################################
# getCurrentParticleRadius
######################################################
def getCurrentParticleRadius(self, sphConfig):
return cmds.getAttr(sphConfig + ".particleRadius")
######################################################
# add rigid bodies
######################################################
def addRigidBody(self, scene, rbNode, scenePath):
# export geometry
tr = cmds.listRelatives( rbNode, allParents=True )
cmds.select(tr, replace=True)
# export geometry
polyTri = cmds.polyTriangulate()
name = cmds.ls( selection=True, type='transform', long=False )[0]
fileName = os.path.join(scenePath, "rb_" + name + ".obj")
cmds.file(fileName, force=True, options="groups=0;ptgroups=0;materials=0;smoothing=0;normals=0", pr=True, exportSelected=True, type="OBJexport")
cmds.delete(polyTri)
attributes = cmds.listAttr(rbNode, string="SPH_*", sn=False)
rb = OrderedDict()
for attr in attributes:
if cmds.getAttr(rbNode + "." + attr, type=True) == "float3":
value = cmds.getAttr(rbNode + "." + attr)[0]
elif cmds.getAttr(rbNode + "." + attr, type=True) == "long3":
value = cmds.getAttr(rbNode + "." + attr)[0]
else:
value = cmds.getAttr(rbNode + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=rbNode, listParent=True )
if parent == None:
rb[attr[4:]] = value
rb['translation'] = [0,0,0]
rb['rotationaxis'] = [1,0,0]
rb['rotationangle'] = 0.0
rb['scale'] = [1,1,1]
rb['geometryFile'] = "rb_" + name + ".obj"
scene['RigidBodies'].append(rb)
#color = cmds.getAttr(rbNode + ".SPH_color")[0]
#color = color + (1.0,)
######################################################
# add fluid
######################################################
def addFluid(self, scene, fluidNode, scenePath):
# export geometry
tr = cmds.listRelatives( fluidNode, allParents=True )
cmds.select(tr, replace=True)
particleFile = cmds.getAttr(fluidNode + ".particleFile")
name = ""
if (particleFile == ""):
polyTri = cmds.polyTriangulate()
name = cmds.ls( selection=True, type='transform', long=False )[0]
fileName = os.path.join(scenePath, "fluid_" + name + ".obj")
cmds.file(fileName, force=True, options="groups=0;ptgroups=0;materials=0;smoothing=0;normals=0", pr=True, exportSelected=True, type="OBJexport")
cmds.delete(polyTri)
attributes = cmds.listAttr(fluidNode, string="SPH_*", sn=False)
fluid = OrderedDict()
for attr in attributes:
if cmds.getAttr(fluidNode + "." + attr, type=True) == "float3":
value = cmds.getAttr(fluidNode + "." + attr)[0]
elif cmds.getAttr(fluidNode + "." + attr, type=True) == "long3":
value = cmds.getAttr(fluidNode + "." + attr)[0]
else:
value = cmds.getAttr(fluidNode + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=fluidNode, listParent=True )
if parent == None:
fluid[attr[4:]] = value
if (particleFile == ""):
fluid['particleFile'] = "fluid_" + name + ".obj"
fluid['translation'] = [0,0,0]
fluid['rotationaxis'] = [1,0,0]
fluid['rotationangle'] = 0.0
fluid['scale'] = [1,1,1]
scene['FluidModels'].append(fluid)
######################################################
# add rectangular emitter
######################################################
def addRectangularEmitter(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
# get particleRadius
radius = self.getCurrentParticleRadius(sphConfig)
diam = 2.0 * radius
s[1] -= 2.0*diam
s[2] -= 2.0*diam
axisAngle = PluginFunctions.getAxisAngle(node)
startTime = cmds.getAttr(node + ".SPH_startTime")
endTime = cmds.getAttr(node + ".SPH_endTime")
velocity = cmds.getAttr(node + ".SPH_velocity")
id = cmds.getAttr(node + ".SPH_fluidId")
emitter = {
'id': id,
'width': int(s[2]/diam),
'height': int(s[1]/diam),
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'emitStartTime': startTime,
'emitEndTime': endTime,
'velocity' : velocity,
'type' : 0
}
scene['Emitters'].append(emitter)
######################################################
# add circular emitter
######################################################
def addCircularEmitter(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
# get particleRadius
radius = self.getCurrentParticleRadius(sphConfig)
s[1] -= 2.0*radius
axisAngle = PluginFunctions.getAxisAngle(node)
startTime = cmds.getAttr(node + ".SPH_startTime")
endTime = cmds.getAttr(node + ".SPH_endTime")
velocity = cmds.getAttr(node + ".SPH_velocity")
id = cmds.getAttr(node + ".SPH_fluidId")
emitter = {
'id': id,
'width': int(s[1]/radius),
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'emitStartTime': startTime,
'emitEndTime': endTime,
'velocity' : velocity,
'type' : 1
}
scene['Emitters'].append(emitter)
######################################################
# add animation field
######################################################
def addAnimationField(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
axisAngle = PluginFunctions.getAxisAngle(node)
particleField = cmds.getAttr(node + ".SPH_particleField")
shapeType = cmds.getAttr(node + ".SPH_shapeType")
expression_x = cmds.getAttr(node + ".SPH_expressionX")
expression_y = cmds.getAttr(node + ".SPH_expressionY")
expression_z = cmds.getAttr(node + ".SPH_expressionZ")
animField = {
'particleField': particleField,
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'scale': s,
'shapeType': shapeType,
'expression_x' : expression_x,
'expression_y' : expression_y,
'expression_z' : expression_z
}
scene['AnimationFields'].append(animField)
def addAttributesToSPHNode(node):
# add attributes
for key in node.sphParameters:
params = node.sphParameters[key]
for param in params:
paramType = param["type"]
paramName = param["name"]
paramLabel = param["label"]
if paramType == "bool":
attr = PluginFunctions.createBoolAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "float":
attr = PluginFunctions.createFloatAttr("SPH_" + paramName, paramName, param["value"], param["min"], param["max"], param["fieldMin"], param["fieldMax"])
node.addAttribute( attr )
elif paramType == "int":
attr = PluginFunctions.createIntAttr("SPH_" + paramName, paramName, param["value"], param["min"], param["max"], param["fieldMin"], param["fieldMax"])
node.addAttribute( attr )
elif paramType == "vec3":
attr = PluginFunctions.createVec3Attr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "color":
attr = PluginFunctions.createColorAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "vec3i":
attr = PluginFunctions.createVec3iAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "enum":
attr = PluginFunctions.createEnumAttr("SPH_" + paramName, paramName, param["value"], param["enumList"])
node.addAttribute( attr )
elif paramType == "string":
attr = PluginFunctions.createStringAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
# Node definition
class SPHConfigurationNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90000)
kPluginNodeTypeName = "SPHConfigurationNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHConfigurationNode.initParameters()
addAttributesToSPHNode(SPHConfigurationNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHConfigurationNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHConfigurationNode.output ):
# dataHandle = dataBlock.inputValue( SPHConfigurationNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHConfigurationNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHConfigurationNode.sphParameters["General"] = [
PluginFunctions.createBoolParam("pause", "Pause", "Pause simulation after loading.", True),
PluginFunctions.createFloatParam("timeStepSize", "Time step size", "Time step size", 0.001, 0.00001, 1.0),
PluginFunctions.createFloatParam("pauseAt", "Pause simulation at", "Pause simulation at the given time. When the value is negative, the simulation is not paused.", -1, -1, 100, -1),
PluginFunctions.createFloatParam("stopAt", "Stop simulation at", "Stop simulation at the given time. When the value is negative, the simulation is not stopped.", -1, -1, 100, -1)
]
SPHConfigurationNode.sphParameters["Visualization"] = [
PluginFunctions.createVec3Param("cameraPosition", "Camera position", "Initial position of the camera.", [0.0,3.0,8.0]),
PluginFunctions.createVec3Param("cameraLookat", "Camera lookat", "Lookat point of the camera.", [0.0,0.0,0.0]),
PluginFunctions.createIntParam("numberOfStepsPerRenderUpdate", "# time steps / update", "Number of simulation steps per rendered frame.", 4, 1, 100),
PluginFunctions.createEnumParam("renderWalls", "Render walls", "Make walls visible/invisible.", 4, ["None", "Particles (all)", "Particles (no walls)", "Geometry (all)", "Geometry (no walls)"]),
]
SPHConfigurationNode.sphParameters["Export"] = [
PluginFunctions.createBoolParam("enablePartioExport", "Partio export", "Enable/disable partio export.", False),
PluginFunctions.createBoolParam("enableRigidBodyExport", "Rigid body export", "Enable/disable rigid body export.", False),
PluginFunctions.createBoolParam("enableVTKExport", "VTK export", "Enable/disable VTK export.", False),
PluginFunctions.createBoolParam("enableRigidBodyVTKExport", "Rigid body VTK export", "Enable/disable rigid body VTK export.", False),
PluginFunctions.createFloatParam("dataExportFPS", "Export FPS", "Frame rate of particle export.", 25, 0.1, 1000),
PluginFunctions.createStringParam("particleAttributes", "Export attributes", "Attributes that are exported in the particle files (except id and position).", "velocity"),
PluginFunctions.createBoolParam("enableStateExport", "State export", "Enable/disable simulation state export.", False),
PluginFunctions.createFloatParam("stateExportFPS", "State export FPS", "Frame rate of state export.", 1, 0.1, 1000)
]
SPHConfigurationNode.sphParameters["Simulation"] = [
PluginFunctions.createBoolParam("sim2D", "2D simulation", "2D/3D simulation.", False),
PluginFunctions.createBoolParam("enableZSort", "Enable z-sort", "Enable z-sort to improve cache hits.", True),
PluginFunctions.createFloatParam("particleRadius", "Particle radius", "Radius of the fluid particles.", 0.025, 0.0001, 1000.0, 0),
PluginFunctions.createVec3Param("gravitation", "Gravitation", "Vector to define the gravitational acceleration.", [0,-9.81,0]),
PluginFunctions.createEnumParam("simulationMethod", "Simulation method", "Simulation method.", 4, ["WCSPH", "PCISPH", "PBF", "IISPH", "DFSPH", "Projective Fluids"]),
PluginFunctions.createIntParam("maxIterations", "Max. iterations", "Maximal number of iterations of the pressure solver.", 100, 1, 1000, 1),
PluginFunctions.createFloatParam("maxError", "Max. density error(%)", "Maximal density error (%).", 0.01, 1.0e-6, 1.0, 0),
PluginFunctions.createEnumParam("boundaryHandlingMethod", "Boundary handling method", "Boundary handling method.", 2, ["Akinci et al. 2012", "Koschier and Bender 2017", "Bender et al. 2019"])
]
SPHConfigurationNode.sphParameters["CFL"] = [
PluginFunctions.createEnumParam("cflMethod", "CFL - method", "CFL method used for adaptive time stepping.", 1, ["None", "CFL", "CFL - iterations"]),
PluginFunctions.createFloatParam("cflFactor", "CFL - factor", "Factor to scale the CFL time step size.", 0.5, 1e-6, 10.0, 0),
PluginFunctions.createFloatParam("cflMinTimeStepSize", "CFL - min. time step size", "Min. time step size.", 0.0001, 1e-7, 1.0, 0),
PluginFunctions.createFloatParam("cflMaxTimeStepSize", "CFL - max. time step size", "Max. time step size.", 0.005, 1e-6, 1.0, 0)
]
SPHConfigurationNode.sphParameters["Kernel"] = [
PluginFunctions.createEnumParam("kernel", "Kernel", "Kernel function used in the SPH model (in 2D use only cubic or Wendland).", 4, ["Cubic spline", "Wendland quintic C2", "Poly6", "Spiky", "Precomputed cubic spline"]),
PluginFunctions.createEnumParam("gradKernel", "Gradient of kernel", "Gradient of the kernel function used in the SPH model (in 2D use only cubic or Wendland).", 4, ["Cubic spline", "Wendland quintic C2", "Poly6", "Spiky", "Precomputed cubic spline"])
]
SPHConfigurationNode.sphParameters["WCSPH"] = [
PluginFunctions.createFloatParam("stiffness", "Stiffness", "Stiffness coefficient of EOS.", 10000, 0, 500000),
PluginFunctions.createFloatParam("exponent", "Exponent (gamma)", "Exponent of EOS.", 7.0, 1.0e-6, 10.0, 0)
]
SPHConfigurationNode.sphParameters["PBF"] = [
PluginFunctions.createEnumParam("velocityUpdateMethod", "Velocity update method", "Method for the velocity integration.", 0, ["First Order Update", "Second Order Update"])
]
SPHConfigurationNode.sphParameters["DFSPH"] = [
PluginFunctions.createIntParam("maxIterationsV", "Max. iterations (divergence)", "Maximal number of iterations of the divergence solver.", 100, 1, 1000, 1),
PluginFunctions.createFloatParam("maxErrorV", "Max. divergence error(%)", "Maximal divergence error (%).", 0.01, 1.0e-6, 1.0, 0),
PluginFunctions.createBoolParam("enableDivergenceSolver", "Enable divergence solver", "Turn divergence solver on/off.", True)
]
SPHConfigurationNode.sphParameters["Projective Fluids"] = [
PluginFunctions.createFloatParam("stiffnessPF", "Stiffness", "Stiffness coefficient.", 50000, 0, 500000)
]
# Node definition
class SPHFluidConfigurationNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90001)
kPluginNodeTypeName = "SPHFluidConfigurationNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
# initializer
@staticmethod
def initialize():
SPHFluidConfigurationNode.initParameters()
addAttributesToSPHNode(SPHFluidConfigurationNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHFluidConfigurationNode() )
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHFluidConfigurationNode.sphParameters["Simulation"] = [
PluginFunctions.createFloatParam("density0", "Rest density", "Rest density of the fluid.", 1000.0, 0.1, 10000.0)
]
SPHFluidConfigurationNode.sphParameters["Visualization"] = [
PluginFunctions.createStringParam("colorField", "Color field", "Choose vector or scalar field for particle coloring.", "velocity"),
PluginFunctions.createEnumParam("colorMapType", "Color map type", "Selection of a color map for coloring the scalar/vector field.", 1, ["None", "Jet", "Plasma"]),
PluginFunctions.createFloatParam("renderMinValue", "Min. value", "Minimal value used for color-coding the color field in the rendering process.", 0, -1000, 1000, -1000000),
PluginFunctions.createFloatParam("renderMaxValue", "Max. value", "Maximal value used for color-coding the color field in the rendering process.", 5, -1000, 1000, -1000000)
]
SPHFluidConfigurationNode.sphParameters["Emitters"] = [
PluginFunctions.createIntParam("maxEmitterParticles", "Max. number of emitted particles", "Maximum number of emitted particles", 10000, 1, 10000000, 0, 100000000),
PluginFunctions.createBoolParam("emitterReuseParticles", "Reuse particles", "Reuse particles if they are outside of the bounding box defined by emitterBoxMin, emitterBoxMaRex.", False),
PluginFunctions.createVec3Param("emitterBoxMin", "Emitter box min.", "Minimum coordinates of an axis-aligned box (used in combination with emitterReuseParticles).", [0.0,0.0,0.0]),
PluginFunctions.createVec3Param("emitterBoxMax", "Emitter box max.", "Maximum coordinates of an axis-aligned box (used in combination with emitterReuseParticles).", [1.0,1.0,1.0])
]
SPHFluidConfigurationNode.sphParameters["Viscosity"] = [
PluginFunctions.createEnumParam("viscosityMethod", "Viscosity", "Method to compute viscosity forces.", 1, ["None", "Standard", "XSPH", "Bender and Koschier 2017", "Peer et al. 2015", "Peer et al. 2016", "Takahashi et al. 2015 (improved)", "Weiler et al. 2018"]),
PluginFunctions.createFloatParam("viscosity", "Viscosity coefficient", "Coefficient for the viscosity force computation.", 0.01, 0, 1000, 0),
PluginFunctions.createIntParam("viscoMaxIter", "Max. iterations (visco)", "(Implicit solvers) Max. iterations of the viscosity solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("viscoMaxError", "Max. visco error", "(Implicit solvers) Max. error of the viscosity solver.", 0.01, 1e-6, 1, 0),
PluginFunctions.createIntParam("viscoMaxIterOmega", "Max. iterations (vorticity diffusion)", "(Peer et al. 2016) Max. iterations of the vorticity diffusion solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("viscoMaxErrorOmega", "Max. vorticity diffusion error", "(Peer et al. 2016) Max. error of the vorticity diffusion solver.", 0.01, 1e-6, 1, 0),
PluginFunctions.createFloatParam("viscosityBoundary", "Viscosity coefficient (Boundary)", "Coefficient for the viscosity force computation at the boundary.", 0.0, 0, 1000, 0)
]
SPHFluidConfigurationNode.sphParameters["Vorticity"] = [
PluginFunctions.createEnumParam("vorticityMethod", "Vorticity method", "Method to compute vorticity forces.", 0, ["None", "Micropolar model", "Vorticity confinement"]),
PluginFunctions.createFloatParam("vorticity", "Vorticity coefficient", "Coefficient for the vorticity force computation.", 0.01, 0, 10.0, 0),
PluginFunctions.createFloatParam("viscosityOmega", "Angular viscosity coefficient", "Viscosity coefficient for the angular velocity field.", 0.1, 0, 10.0, 0),
PluginFunctions.createFloatParam("inertiaInverse", "Inertia inverse", "Inverse microinertia used in the micropolar model.", 0.5, 0, 10.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Drag force"] = [
PluginFunctions.createEnumParam("dragMethod", "Drag method", "Method to compute drag forces.", 0, ["None", "Macklin et al. 2014", "Gissler et al. 2017"]),
PluginFunctions.createFloatParam("drag", "Drag coefficient", "Coefficient for the drag force computation.", 0.01, 0, 100.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Surface tension"] = [
PluginFunctions.createEnumParam("surfaceTensionMethod", "Surface tension method", "Method to compute surface tension forces.", 0, ["None", "Becker & Teschner 2007", "Akinci et al. 2013", "He et al. 2014"]),
PluginFunctions.createFloatParam("surfaceTension", "Surface tension coefficient", "Coefficient for the surface tension computation.", 0.05, 0, 100.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Elasticity"] = [
PluginFunctions.createEnumParam("elasticityMethod", "Elasticity method", "Method to compute elastic forces.", 0, ["None", "Becker et al. 2009", "Peer et al. 2018"]),
PluginFunctions.createFloatParam("youngsModulus", "Young's modulus", "Stiffness of the elastic material.", 100000.0, 0, 1000.0, 0),
PluginFunctions.createFloatParam("poissonsRatio", "Poisson's ratio", "Ratio of transversal expansion and axial compression.", 0.3, -0.9999, 0.4999, -0.9999),
PluginFunctions.createIntParam("elasticityMaxIter", "Max. iterations (elasticity)", "(Implicit solvers) Max. iterations of the elasticity solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("elasticityMaxError", "Max. elasticity error", "(Implicit solvers) Max. error of the elasticity solver.", 0.0001, 1e-6, 1, 0),
PluginFunctions.createFloatParam("alpha", "Zero-energy modes suppression", "Coefficent for zero-energy modes suppression method.", 0.0, 0, 10000.0, 0)
]
class SPHFluidNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90002)
kPluginNodeTypeName = "SPHFluidNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHFluidNode.initParameters()
addAttributesToSPHNode(SPHFluidNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHFluidNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHFluidNode.output ):
# dataHandle = dataBlock.inputValue( SPHFluidNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHFluidNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHFluidNode.sphParameters["General"] = [
PluginFunctions.createStringParam("id", "Fluid id", "Id of the fluid material.", "Fluid"),
PluginFunctions.createVec3Param("initialVelocity", "Initial velocity", "Initial velocity of the fluid.", [0.0,0.0,0.0]),
PluginFunctions.createVec3Param("initialAngularVelocity", "Initial angular velocity", "Initial angular velocity of the fluid.", [0.0,0.0,0.0]),
PluginFunctions.createVec3iParam("resolutionSDF", "SDF resolution", "Resolution of the SDF.", [20,20,20]),
PluginFunctions.createBoolParam("invert", "Invert SDF", "Invert the SDF, flips inside/outside.", False),
PluginFunctions.createEnumParam("denseMode", "Dense mode", "Sampling mode.", 0, ["Regular", "Almost dense", "Dense"]),
PluginFunctions.createStringParam("particleFile", "Particle sampling file", "Particle sampling file.", ""),
]
class SPHRigidBodyNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90003)
kPluginNodeTypeName = "SPHRigidBodyNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHRigidBodyNode.initParameters()
addAttributesToSPHNode(SPHRigidBodyNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHRigidBodyNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHRigidBodyNode.output ):
# dataHandle = dataBlock.inputValue( SPHRigidBodyNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHRigidBodyNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHRigidBodyNode.sphParameters["General"] = [
PluginFunctions.createBoolParam("isDynamic", "Dynamic", "Defines if the body is static or dynamic.", False),
PluginFunctions.createBoolParam("isWall", "Wall", "Defines if this is a wall. Walls are typically not rendered. This is the only difference.", False),
PluginFunctions.createColorParam("color", "Color", "Color of the body", [0.2, 0.2, 0.2]),
PluginFunctions.createFloatParam("density", "Density", "Rest density of the body.", 1000.0, 0, 100000.0, 0),
PluginFunctions.createVec3iParam("mapResolution", "Map resolution", "Resolution of the volume/density map.", [20,20,20]),
PluginFunctions.createBoolParam("mapInvert", "Invert map", "Invert the volume/density map, flips inside/outside.", False),
PluginFunctions.createFloatParam("mapThickness", "Map thickness", "Thickness of the map.", 0.0, 0, 100.0, 0),
PluginFunctions.createVec3iParam("resolutionSDF", "SDF resolution", "Resolution of the SDF.", [20,20,20]),
PluginFunctions.createBoolParam("invert", "Invert SDF", "Invert the SDF, flips inside/outside.", False),
PluginFunctions.createEnumParam("samplingMode", "Sampling mode", "Sampling mode.", 0, ["Poisson disk sanmpling", "Regular triangle sampling"]),
]
######################################################
# loadRigidBodies
#
# load rigid body data that was exported by
# a SPH simulation
######################################################
class loadRigidBodiesCmd(OpenMayaMPx.MPxCommand):
s_name = "loadRigidBodies"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return loadRigidBodiesCmd()
def doIt(self, args):
self.addedNodes = []
self.firstFileName = cmds.fileDialog2(ff="*.bin", fm=1, dir="")[0]
indexlist = re.findall(r'\d+', self.firstFileName)
if len(indexlist) == 0:
cmds.warning("No frame index found in file name.")
return
self.firstFrame = int(indexlist[-1])
self.redoIt()
def redoIt(self):
self.loadRigidBodies()
def loadRigidBodies(self):
folderName = os.path.dirname(self.firstFileName)
frameNumber = self.firstFrame
firstFile = open(self.firstFileName, 'rb')
# read number of bodies
bytes = firstFile.read()
firstFile.close()
(numBodies,), bytes = struct.unpack('i', bytes[:4]), bytes[4:]
objFiles = []
transformNodes = []
for i in range(0, numBodies):
# determine length of file name string
(strLength,), bytes = struct.unpack('i', bytes[:4]), bytes[4:]
# read file name
objFile, bytes = bytes[:strLength], bytes[strLength:]
# Check for duplicates and create instances
if objFile in objFiles:
idx = objFiles.index(objFile)
newNodes = cmds.duplicate(transformNodes[idx], instanceLeaf= True)
transformNodes.append(newNodes[0])
self.addedNodes.append(newNodes)
else:
objFileName = os.path.join(folderName, objFile)
newNodes = cmds.file(objFileName, i=True, rnn=True, type="OBJ", options="mo=1")
transformNodes.append(newNodes[0])
objFiles.append(objFile)
self.addedNodes.append(newNodes)
# Read scaling factors in first file
(sx,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(sy,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(sz,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
cmds.scale(sx, sy, sz, transformNodes[i])
(isWall,), bytes = struct.unpack('?', bytes[:1]), bytes[1:]
(colr,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(colg,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(colb,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(cola,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
if isWall:
cmds.setAttr((transformNodes[i] + '.visibility'), 0)
cmds.setKeyframe(transformNodes[i], at="s", t=1)
if frameNumber > 1:
cmds.setKeyframe(transformNodes[i], at="visibility", t=1, value=0)
if not isWall:
cmds.setKeyframe(transformNodes[i], at="visibility", t=frameNumber, value=1)
# load transformations
for i in range(0, numBodies):
# Read translation in first file
(x,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(y,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(z,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
# Read rotation in first file
r = []
for j in range(0,9):
(value,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
r.append(value)
cmds.xform(transformNodes[i], p=True, m=[r[0],r[1],r[2],0,r[3],r[4],r[5],0,r[6],r[7],r[8],0,x,y,z,1])
cmds.setKeyframe(transformNodes[i], at="t", t=frameNumber)
cmds.setKeyframe(transformNodes[i], at="r", t=frameNumber)
# read other files
idx = self.firstFileName.rfind(str(frameNumber))
l = len(str(frameNumber))
chk = True
while chk:
frameNumber += 1
fileName = str(self.firstFileName[0:idx]) + str(frameNumber) + str(self.firstFileName[idx+l:])
chk = os.path.exists(fileName)
if chk:
f = open(fileName, 'rb')
bytes = f.read()
f.close()
# load transformations
for i in range(0, numBodies):
# Read translation in file
(x,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(y,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(z,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
# Read rotation in file
r = []
for j in range(0,9):
(value,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
r.append(value)
cmds.xform(transformNodes[i], p=True, m=[r[0],r[1],r[2],0,r[3],r[4],r[5],0,r[6],r[7],r[8],0,x,y,z,1])
cmds.setKeyframe(transformNodes[i], at="t", t=frameNumber)
cmds.setKeyframe(transformNodes[i], at="r", t=frameNumber)
cmds.currentTime(1)
def undoIt(self):
for node in self.addedNodes:
print node
cmds.delete(node)
def isUndoable(self):
return True
######################################################
# createSPHMenu
######################################################
def createSPHMenu():
global menuId
menuId = cmds.menu( label='SPlisHSPlasH', p="MayaWindow" )
cmds.menuItem(divider=True, dividerLabel="Scene generating")
cmds.menuItem( label='Add scene configuration',command=
'if "SPH_Config" not in cmds.ls( type="transform"):\n' +
' cmds.createNode("transform", name="SPH_Config")\n' +
'cmds.createNode("SPHConfigurationNode", name="Configuration", parent="SPH_Config")')
cmds.menuItem( label='Add fluid material',command=
'if "SPH_Fluid_Material" not in cmds.ls( type="transform"):\n' +
' cmds.createNode("transform", name="SPH_Fluid_Material")\n' +
'cmds.createNode("SPHFluidConfigurationNode", name="Fluid", parent="SPH_Fluid_Material")')
cmds.menuItem(divider=True)
cmds.menuItem( label='Convert selection to fluid',command='cmds.convertToFluid()' )
cmds.menuItem( label='Convert selection to rigid bodies',command='cmds.convertToRigidBodies()' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Create rectangular emitter',command='cmds.createRectangularEmitter()' )
cmds.menuItem( label='Create circular emitter',command='cmds.createCircularEmitter()' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Create box animation field',command='cmds.createAnimationField(s=0)' )
cmds.menuItem( label='Create sphere animation field',command='cmds.createAnimationField(s=1)' )
cmds.menuItem( label='Create cylinder animation field',command='cmds.createAnimationField(s=2)' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Save scene',command='cmds.saveModel()' )
cmds.menuItem(divider=True, dividerLabel="Import")
cmds.menuItem( label='Load rigid body data',command='cmds.loadRigidBodies()' )
######################################################
# deleteSPHMenu
######################################################
def deleteSPHMenu():
global menuId
cmds.deleteUI(menuId)
return
# Initialize the script plug-in
def initializePlugin(mobject):
global settingsWinId
global fluidWinId
global menuId
global fluidIds
global sphParameters
global fluidParameters
mplugin = OpenMayaMPx.MFnPlugin(mobject, "SPlisHSPlasH", "1.0", "Any")
settingsWinId = ""
fluidWinId = ""
menuId = ""
fluidIds = ["Fluid"]
try:
mplugin.registerNode( SPHConfigurationNode.kPluginNodeTypeName, SPHConfigurationNode.kPluginNodeId, SPHConfigurationNode.creator, SPHConfigurationNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHFluidConfigurationNode.kPluginNodeTypeName, SPHFluidConfigurationNode.kPluginNodeId, SPHFluidConfigurationNode.creator, SPHFluidConfigurationNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHFluidNode.kPluginNodeTypeName, SPHFluidNode.kPluginNodeId, SPHFluidNode.creator, SPHFluidNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHRigidBodyNode.kPluginNodeTypeName, SPHRigidBodyNode.kPluginNodeId, SPHRigidBodyNode.creator, SPHRigidBodyNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerCommand(createRectangularEmitterCmd.s_name, createRectangularEmitterCmd.creator)
mplugin.registerCommand(createCircularEmitterCmd.s_name, createCircularEmitterCmd.creator)
mplugin.registerCommand(saveModelCmd.s_name, saveModelCmd.creator)
mplugin.registerCommand(convertToFluidCmd.s_name, convertToFluidCmd.creator)
mplugin.registerCommand(convertToRigidBodiesCmd.s_name, convertToRigidBodiesCmd.creator)
mplugin.registerCommand(createAnimationFieldCmd.s_name, createAnimationFieldCmd.creator, createAnimationFieldCmd.syntaxCreator)
mplugin.registerCommand(loadRigidBodiesCmd.s_name, loadRigidBodiesCmd.creator)
except:
sys.stderr.write( "Failed to register nodes." )
raise
createSPHMenu()
# Uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
deleteSPHMenu()
try:
mplugin.deregisterCommand(createRectangularEmitterCmd.s_name)
mplugin.deregisterCommand(createCircularEmitterCmd.s_name)
mplugin.deregisterCommand(saveModelCmd.s_name)
mplugin.deregisterCommand(convertToFluidCmd.s_name)
mplugin.deregisterCommand(convertToRigidBodiesCmd.s_name)
mplugin.deregisterCommand(createAnimationFieldCmd.s_name)
mplugin.deregisterCommand(loadRigidBodiesCmd.s_name)
mplugin.deregisterNode( SPHRigidBodyNode.kPluginNodeId )
mplugin.deregisterNode( SPHFluidNode.kPluginNodeId )
mplugin.deregisterNode( SPHFluidConfigurationNode.kPluginNodeId )
mplugin.deregisterNode( SPHConfigurationNode.kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node")
raise
| python |
from threading import current_thread
from threading import Thread as _Thread
class Thread(_Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None):
super().__init__(group, target, name, args, kwargs)
self.done = False
self.result = None
self.start()
def run(self):
try:
if self._target:
self.result = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
self.done = True
def join(self, timeout=None):
if not self._initialized:
raise RuntimeError("Thread.__init__() n t called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
self._wait_for_tstate_lock(timeout=max(timeout, 0))
if self.done:
return self.result
def _chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
| python |
"""baseline
Revision ID: bb972e06e6f7
Revises:
Create Date: 2020-01-22 23:03:09.267552
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb972e06e6f7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| python |
# coding: utf-8
import sublime, sublime_plugin
import json
import re
import locale
import calendar
import itertools
from datetime import datetime
from datetime import timedelta
NT = sublime.platform() == 'windows'
ST3 = int(sublime.version()) >= 3000
if ST3:
from .APlainTasksCommon import PlainTasksBase, PlainTasksEnabled, PlainTasksFold
MARK_SOON = sublime.DRAW_NO_FILL
MARK_INVALID = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE
else:
from APlainTasksCommon import PlainTasksBase, PlainTasksEnabled, PlainTasksFold
MARK_SOON = MARK_INVALID = 0
sublime_plugin.ViewEventListener = object
try: # unavailable dependencies shall not break basic functionality
from dateutil import parser as dateutil_parser
from dateutil.relativedelta import relativedelta
except:
dateutil_parser = None
if ST3:
locale.setlocale(locale.LC_ALL, '')
def is_yearfirst(date_format):
return date_format.strip('( )').startswith(('%y', '%Y'))
def is_dayfirst(date_format):
return date_format.strip('( )').startswith(('%d'))
def _convert_date(matchstr, now):
match_obj = re.search(r'''(?mxu)
(?:\s*
(?P<yearORmonthORday>\d*(?!:))
(?P<sep>[-\.])?
(?P<monthORday>\d*)
(?P=sep)?
(?P<day>\d*)
(?! \d*:)(?# e.g. '23:' == hour, but '1 23:' == day=1, hour=23)
)?
\s*
(?:
(?P<hour>\d*)
:
(?P<minute>\d*)
)?''', matchstr)
year = now.year
month = now.month
day = int(match_obj.group('day') or 0)
# print(day)
if day:
year = int(match_obj.group('yearORmonthORday'))
month = int(match_obj.group('monthORday'))
else:
day = int(match_obj.group('monthORday') or 0)
# print(day)
if day:
month = int(match_obj.group('yearORmonthORday'))
if month < now.month:
year += 1
else:
day = int(match_obj.group('yearORmonthORday') or 0)
# print(day)
if 0 < day <= now.day:
# expect next month
month += 1
if month == 13:
year += 1
month = 1
elif not day: # @due(0) == today
day = now.day
# else would be day>now, i.e. future day in current month
hour = match_obj.group('hour') or now.hour
minute = match_obj.group('minute') or now.minute
hour, minute = int(hour), int(minute)
if year < 100:
year += 2000
# print(year, month, day, hour, minute)
return year, month, day, hour, minute
def convert_date(matchstr, now):
year = month = day = hour = minute = None
try:
year, month, day, hour, minute = _convert_date(matchstr, now)
date = datetime(year, month, day, hour, minute, 0)
except (ValueError, OverflowError) as e:
return None, (e, year, month, day, hour, minute)
else:
return date, None
def increase_date(view, region, text, now, date_format):
# relative from date of creation if any
if '++' in text:
line = view.line(region)
line_content = view.substr(line)
created = re.search(r'(?mxu)@created\(([\d\w,\.:\-\/ @]*)\)', line_content)
if created:
created_date, error = parse_date(created.group(1),
date_format=date_format,
yearfirst=is_yearfirst(date_format),
dayfirst=is_dayfirst(date_format),
default=now)
if error:
ln = (view.rowcol(line.a)[0] + 1)
print(u'\nPlainTasks:\nError at line %d\n\t%s\ncaused by text:\n\t"%s"\n' % (ln, error, created.group(0)))
sublime.status_message(u'@created date is invalid at line %d, see console for details' % ln)
else:
now = created_date
match_obj = re.search(r'''(?mxu)
\s*\+\+?\s*
(?:
(?P<number>\d*(?![:.]))\s*
(?P<days>[Dd]?)
(?P<weeks>[Ww]?)
(?! \d*[:.])
)?
\s*
(?:
(?P<hour>\d*)
[:.]
(?P<minute>\d*)
)?''', text)
number = int(match_obj.group('number') or 0)
days = match_obj.group('days')
weeks = match_obj.group('weeks')
hour = int(match_obj.group('hour') or 0)
minute = int(match_obj.group('minute') or 0)
if not (number or hour or minute) or (not number and (days or weeks)):
# set 1 if number is ommited, i.e.
# @due(+) == @due(+1) == @due(+1d)
# @due(+w) == @due(+1w)
number = 1
delta = error = None
amount = number * 7 if weeks else number
try:
delta = now + timedelta(days=(amount), hours=hour, minutes=minute)
except (ValueError, OverflowError) as e:
error = e, amount, hour, minute
return delta, error
def expand_short_date(view, start, end, now, date_format):
while view.substr(start) != '(':
start -= 1
while view.substr(end) != ')':
end += 1
region = sublime.Region(start + 1, end)
text = view.substr(region)
# print(text)
if '+' in text:
date, error = increase_date(view, region, text, now, date_format)
else:
date, error = parse_date(text,
date_format,
yearfirst=is_yearfirst(date_format),
dayfirst=is_dayfirst(date_format),
default=now)
return date, error, sublime.Region(start, end + 1)
def parse_date(date_string, date_format='(%y-%m-%d %H:%M)', yearfirst=True, dayfirst=False, default=None):
'''
Attempt to convert arbitrary string to datetime object
date_string
Unicode
date_format
Unicode
yearfirst
boolin
default
datetime object (now)
'''
#print("[date_string]", date_string, "[format] ", date_format)
try:
return datetime.strptime(date_string, date_format), None
except ValueError as e:
# print("[ValueError]:", e)
pass
bare_date_string = date_string.strip('( )')
items = len(bare_date_string.split('-' if '-' in bare_date_string else '.'))
try:
#[HKC] Initially it was < 3, but date_string of "233" will be converted to
# year of 0233, which is silly
if items == 1 and len(bare_date_string) <= 3:
raise Exception("Invalid date_string:", date_string)
if items < 2 and len(bare_date_string) < 3:
# e.g. @due(1) is always first day of next month,
# but dateutil consider it 1st day of current month
raise Exception("Special case of short date: less than 2 numbers")
if items < 3 and any(s in date_string for s in '-.'):
# e.g. @due(2-1) is always Fabruary 1st of next year,
# but dateutil consider it this year
raise Exception("Special case of short date: less than 3 numbers")
date = dateutil_parser.parse(bare_date_string,
yearfirst=yearfirst,
dayfirst=dayfirst,
default=default)
#print("[Parsed Date]", date)
if all((date.year < 1900, '%y' in date_format)):
return None, ('format %y requires year >= 1900', date.year, date.month, date.day, date.hour, date.minute)
except Exception as e:
#print("[Exception]:", e, "[date_string]:", date_string)
date, error = convert_date(bare_date_string, default)
else:
error = None
return date, error
def format_delta(view, delta):
delta -= timedelta(microseconds=delta.microseconds)
if view.settings().get('decimal_minutes', False):
days = delta.days
delta = u'%s%s%s%s' % (days or '', ' day, ' if days == 1 else '', ' days, ' if days > 1 else '', '%.2f' % (delta.seconds / 3600.0) if delta.seconds else '')
else:
delta = str(delta)
if delta[~7:] == ' 0:00:00' or delta == '0:00:00': # strip meaningless time
delta = delta[:~6]
elif delta[~2:] == ':00': # strip meaningless seconds
delta = delta[:~2]
return delta.strip(' ,')
class PlainTasksToggleHighlightPastDue(PlainTasksEnabled):
def run(self, edit):
highlight_on = self.view.settings().get('highlight_past_due', True)
self.view.erase_regions('past_due')
self.view.erase_regions('due_soon')
self.view.erase_regions('misformatted')
if not highlight_on:
return
pattern = r'@due(\([^@\n]*\))'
dates_strings = []
dates_regions = self.view.find_all(pattern, 0, '\\1', dates_strings)
if not dates_regions:
if ST3:
self.view.settings().set('plain_tasks_remain_time_phantoms', [])
return
past_due, due_soon, misformatted, phantoms = self.group_due_tags(dates_strings, dates_regions)
scope_past_due = self.view.settings().get('scope_past_due', 'string.other.tag.todo.critical')
scope_due_soon = self.view.settings().get('scope_due_soon', 'string.other.tag.todo.high')
scope_misformatted = self.view.settings().get('scope_misformatted', 'string.other.tag.todo.low')
icon_past_due = self.view.settings().get('icon_past_due', 'circle')
icon_due_soon = self.view.settings().get('icon_due_soon', 'dot')
icon_misformatted = self.view.settings().get('icon_misformatted', '')
self.view.add_regions('past_due', past_due, scope_past_due, icon_past_due)
self.view.add_regions('due_soon', due_soon, scope_due_soon, icon_due_soon, MARK_SOON)
self.view.add_regions('misformatted', misformatted, scope_misformatted, icon_misformatted, MARK_INVALID)
if not ST3:
return
if self.view.settings().get('show_remain_due', False):
self.view.settings().set('plain_tasks_remain_time_phantoms', phantoms)
else:
self.view.settings().set('plain_tasks_remain_time_phantoms', [])
def group_due_tags(self, dates_strings, dates_regions):
past_due, due_soon, misformatted, phantoms = [], [], [], []
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
yearfirst = is_yearfirst(date_format)
now = datetime.now()
default = now - timedelta(seconds=now.second, microseconds=now.microsecond) # for short dates w/o time
due_soon_threshold = self.view.settings().get('highlight_due_soon', 24) * 60 * 60
for i, region in enumerate(dates_regions):
if any(s in self.view.scope_name(region.a) for s in ('completed', 'cancelled')):
continue
text = dates_strings[i]
if '+' in text:
date, error = increase_date(self.view, region, text, default, date_format)
# print(date, date_format)
else:
date, error = parse_date(text,
date_format=date_format,
yearfirst=yearfirst,
dayfirst=is_dayfirst(date_format),
default=default)
# print(date, date_format, yearfirst)
if error:
# print(error)
misformatted.append(region)
else:
if now >= date:
past_due.append(region)
phantoms.append((region.a, '-' + format_delta(self.view, default - date)))
else:
phantoms.append((region.a, format_delta(self.view, date - default)))
if due_soon_threshold:
td = (date - now)
# timedelta.total_seconds() is not available in 2.6.x
time_left = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.0**6
if time_left < due_soon_threshold:
due_soon.append(region)
return past_due, due_soon, misformatted, phantoms
class PlainTasksHLDue(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
view.run_command('plain_tasks_toggle_highlight_past_due')
def on_post_save(self, view):
self.on_activated(view)
def on_load(self, view):
self.on_activated(view)
class PlainTasksFoldToDueTags(PlainTasksFold):
def run(self, edit):
if not self.view.settings().get('highlight_past_due', True):
return sublime.message_dialog('highlight_past_due setting must be true')
self.view.run_command('plain_tasks_toggle_highlight_past_due')
dues = sorted(self.view.line(r) for r in (self.view.get_regions('past_due') + self.view.get_regions('due_soon')))
if not dues:
return sublime.message_dialog('No overdue tasks.\nCongrats!')
self.exec_folding(self.add_projects_and_notes(dues))
class PlainTasksCalculateTotalTimeForProject(PlainTasksEnabled):
def run(self, edit, start):
line = self.view.line(int(start))
total, eol = self.calc_total_time_for_project(line)
if total:
self.view.insert(edit, eol, ' @total(%s)' % format_delta(self.view, total).rstrip(', '))
def calc_total_time_for_project(self, line):
pattern = r'(?<=\s)@(lasted|wasted|total)\([ \t]*(?:(\d+)[ \t]*days?,?)?[ \t]*((?:(\d+)\:(\d+)\:?(\d+)?)|(?:(\d+)\.(\d+)))?[ \t]*\)'
format = '{"days": "\\2", "hours": "\\4", "minutes": "\\5", "seconds": "\\6", "dhours": "\\7", "dminutes": "\\8"}'
lasted_strings = []
lasted_regions = self.view.find_all(pattern, 0, format, lasted_strings)
if not lasted_regions:
return 0, 0
eol = line.end()
project_block = self.view.indented_region(eol + 1)
total = timedelta()
for i, region in enumerate(lasted_regions):
if not all((region > line, region.b <= project_block.b)):
continue
t = json.loads(lasted_strings[i].replace('""', '"0"'))
total += timedelta(days=int(t['days']),
hours=int(t['hours']) or int(t['dhours']),
minutes=int(t['minutes']) or int(t['dminutes']) * 60,
seconds=int(t['seconds']))
return total, eol
class PlainTasksCalculateTimeForTask(PlainTasksEnabled):
def run(self, edit, started_matches, toggle_matches, now, eol, tag='lasted'):
'''
started_matches
list of Unicode objects
toggle_matches
list of Unicode objects
now
Unicode object, moment of completion or cancellation of a task
eol
int as str (abs. point of end of task line without line break)
tag
Unicode object (lasted for complete, wasted for cancelled)
'''
if not started_matches:
return
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
start = datetime.strptime(started_matches[0], date_format)
end = datetime.strptime(now, date_format)
toggle_times = [datetime.strptime(toggle, date_format) for toggle in toggle_matches]
all_times = [start] + toggle_times + [end]
pairs = zip(all_times[::2], all_times[1::2])
deltas = [pair[1] - pair[0] for pair in pairs]
delta = format_delta(self.view, sum(deltas, timedelta()))
tag = ' @%s(%s)' % (tag, delta.rstrip(', ') if delta else ('a bit' if '%H' in date_format else 'less than day'))
eol = int(eol)
if self.view.substr(sublime.Region(eol - 2, eol)) == ' ':
eol -= 2 # keep double whitespace at eol
self.view.insert(edit, eol, tag)
class PlainTasksReCalculateTimeForTasks(PlainTasksEnabled):
def run(self, edit):
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
calculated = r'([ \t]@[lw]asted\([\d\w,\.:\-\/ @]*\))'
done = r'^\s*[^\b]*?\s*@(done|cancell?ed)[ \t]*(\([\d\w,\.:\-\/ @]*\)).*$'
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
default_now = datetime.now().strftime(date_format)
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
current_scope = self.view.scope_name(line.a)
if not any(s in current_scope for s in ('completed', 'cancelled')):
continue
line_contents = self.view.substr(line)
done_match = re.match(done, line_contents, re.U)
now = done_match.group(2) if done_match else default_now
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
calc_matches = re.findall(calculated, line_contents, re.U)
for match in calc_matches:
line_contents = line_contents.replace(match, '')
self.view.replace(edit, line, line_contents)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.begin() + len(line_contents),
'tag': 'lasted' if 'completed' in current_scope else 'wasted'}
)
class PlainTaskInsertDate(PlainTasksBase):
def runCommand(self, edit, region=None, date=None):
if region:
y, m, d, H, M = date
region = sublime.Region(*region)
self.view.replace(edit, region, datetime(y, m, d, H, M, 0).strftime(self.date_format) + ' ')
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.view.line(region).b))
return
for s in reversed(list(self.view.sel())):
self.view.insert(edit, s.b, datetime.now().strftime(self.date_format))
class PlainTasksReplaceShortDate(PlainTasksBase):
def runCommand(self, edit):
s = self.view.sel()[0]
date, error, region = expand_short_date(self.view, s.a, s.b, datetime.now(), self.date_format)
if not date:
sublime.error_message(
'PlainTasks:\n\n'
'{0}:\n days:\t{1}\n hours:\t{2}\n minutes:\t{3}\n'.format(*error) if len(error) == 4 else
'{0}:\n year:\t{1}\n month:\t{2}\n day:\t{3}\n HH:\t{4}\n MM:\t{5}\n'.format(*error))
return
date = date.strftime(self.date_format)
self.view.replace(edit, region, date)
offset = region.a + len(date)
self.view.sel().clear()
self.view.sel().add(sublime.Region(offset, offset))
class PlainTasksViewEventListener(sublime_plugin.ViewEventListener):
@classmethod
def is_applicable(cls, settings):
return settings.get('syntax') in ('Packages/PlainTasks/PlainTasks.sublime-syntax', 'Packages/PlainTasks/PlainTasks.tmLanguage')
class PlainTasksPreviewShortDate(PlainTasksViewEventListener):
def __init__(self, view):
self.view = view
self.phantoms = sublime.PhantomSet(view, 'plain_tasks_preview_short_date')
def on_selection_modified_async(self):
self.phantoms.update([]) # https://github.com/SublimeTextIssues/Core/issues/1497
s = self.view.sel()[0]
if not (s.empty() and 'meta.tag.todo' in self.view.scope_name(s.a)):
return
rgn = self.view.extract_scope(s.a)
text = self.view.substr(rgn)
match = re.match(r'@due\(([^@\n]*)\)[\s$]*', text)
# print(s, rgn, text)
if not match:
return
# print(match.group(1))
preview_offset = self.view.settings().get('due_preview_offset', 0)
remain_format = self.view.settings().get('due_remain_format', '{time} remaining')
overdue_format = self.view.settings().get('due_overdue_format', '{time} overdue')
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
start = rgn.a + 5 # within parenthesis
now = datetime.now().replace(second=0, microsecond=0)
date, error, region = expand_short_date(self.view, start, start, now, date_format)
upd = []
if not error:
if now >= date:
delta = '-' + format_delta(self.view, now - date)
else:
delta = format_delta(self.view, date - now)
content = (overdue_format if '-' in delta else remain_format).format(time=delta.lstrip('-') or 'a little bit')
if content:
if self.view.settings().get('show_remain_due', False):
# replace existing remain/overdue phantom
phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
for index, (point, _) in enumerate(phantoms):
if point == region.a - 4:
phantoms[index] = [point, str(delta)]
self.view.settings().set('plain_tasks_remain_time_phantoms', phantoms)
break
else:
upd.append(sublime.Phantom(
sublime.Region(region.a - 4),
content,
sublime.LAYOUT_BELOW))
date = date.strftime(date_format).strip('()')
if date == match.group(1).strip():
self.phantoms.update(upd)
return
upd.append(sublime.Phantom(
sublime.Region(region.b - preview_offset),
date or (
'{0}:<br> days:\t{1}<br> hours:\t{2}<br> minutes:\t{3}<br>'.format(*error) if len(error) == 4 else
'{0}:<br> year:\t{1}<br> month:\t{2}<br> day:\t{3}<br> HH:\t{4}<br> MM:\t{5}<br>'.format(*error)),
sublime.LAYOUT_INLINE))
self.phantoms.update(upd)
class PlainTasksChooseDate(sublime_plugin.ViewEventListener):
def __init__(self, view):
self.view = view
@classmethod
def is_applicable(cls, settings):
return settings.get('show_calendar_on_tags')
def on_selection_modified_async(self):
s = self.view.sel()[0]
if not (s.empty() and any('meta.tag.todo ' in self.view.scope_name(n) for n in (s.a, s.a - 1))):
return
self.view.run_command('plain_tasks_calendar', {'point': s.a})
class PlainTasksCalendar(sublime_plugin.TextCommand):
def is_visible(self):
return ST3
def run(self, edit, point=None):
point = point or self.view.sel()[0].a
self.region, tag = self.extract_tag(point)
content = self.generate_calendar()
self.view.show_popup(content, sublime.COOPERATE_WITH_AUTO_COMPLETE, self.region.a, 555, 555, self.action)
def extract_tag(self, point):
'''point is cursor
Return tuple of two elements
Region
which will be replaced with chosen date, it may be parentheses belong to tag, or end of tag, or point
Unicode
tag under cursor (i.e. point)
'''
start = end = point
tag_pattern = r'(?<=\s)(\@[^\(\) ,\.]+)([\w\d\.\(\)\-!? :\+]*)'
line = self.view.line(point)
matches = re.finditer(tag_pattern, self.view.substr(line))
for match in matches:
m_start = line.a + match.start(1)
m_end = line.a + match.end(2)
if m_start <= point <= m_end:
start = line.a + match.start(2)
end = m_end
break
else:
match = None
tag = match.group(0) if match else ''
return sublime.Region(start, end), tag
def generate_calendar(self, date=None):
date = date or datetime.now()
y, m, d, H, M = date.year, date.month, date.day, date.hour, date.minute
content = ('<style> #today {{color: var(--background); background-color: var(--foreground)}}</style>'
'<br> <center><big>{prev_month} {next_month} {month}'
' {prev_year} {next_year} {year}</big></center><br><br>'
'{table}<br> {time}<br><br><hr>'
'<br> Click day to insert date '
'<br> into view, click month or '
'<br> time to switch the picker <br><br>'
)
locale.setlocale(locale.LC_ALL, '') # to get native month name
month = '<a href="month:{0}-{1}-{2}-{3}-{4}">{5}</a>'.format(y, m, d, H, M, date.strftime('%B'))
prev_month = '<a href="prev_month:{0}-{1}-{2}-{3}-{4}">←</a>'.format(y, m, d, H, M)
next_month = '<a href="next_month:{0}-{1}-{2}-{3}-{4}">→</a>'.format(y, m, d, H, M)
prev_year = '<a href="prev_year:{0}-{1}-{2}-{3}-{4}">←</a>'.format(y, m, d, H, M)
next_year = '<a href="next_year:{0}-{1}-{2}-{3}-{4}">→</a>'.format(y, m, d, H, M)
year = '<a href="year:{0}-{1}-{2}-{3}-{4}">{0}</a>'.format(y, m, d, H, M)
table = ''
for week in calendar.Calendar().monthdayscalendar(y, m):
row = ['']
for day in week:
link = '<a href="day:{0}-{1}-{2}-{3}-{4}"{5}>{2}</a>'.format(y, m, day, H, M, ' id="today"' if d == day else '')
cell = (' %s' % link if day < 10 else ' %s' % link) if day else ' '
row.append(cell)
table += ' '.join(row + ['<br><br>'])
time = '<a href="time:{0}-{1}-{2}-{3}-{4}">{5}</a>'.format(y, m, d, H, M, date.strftime('%H:%M'))
return content.format(
prev_month=prev_month, next_month=next_month, month=month,
prev_year=prev_year, next_year=next_year, year=year,
time=time, table=table)
def action(self, payload):
msg, stamp = payload.split(':')
def insert(stamp):
self.view.hide_popup()
y, m, d, H, M = (int(i) for i in stamp.split('-'))
self.view.run_command('plain_task_insert_date', {'region': (self.region.a, self.region.b), 'date': (y, m, d, H, M)})
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.region.b + 1))
def generate_months(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
months = ['<br>{5}<a href="year:{0}-{1}-{2}-{3}-{4}">{0}</a><br><br>'.format(y, m, d, H, M, ' ' * 8)]
for i in range(1, 13):
months.append('{6}<a href="calendar:{0}-{1}-{2}-{3}-{4}">{5}</a> '.format(y, i, d, H, M, datetime(y, i, 1, H, M, 0).strftime('%b'), '•' if i == m else ' '))
if i in (4, 8, 12):
months.append('<br><br>')
self.view.update_popup(''.join(months))
def generate_years(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
years = ['<br>']
for i in range(y - 6, y + 6):
years.append('{5}<a href="month:{0}-{1}-{2}-{3}-{4}">{0}</a> '.format(i, m, d, H, M, '•' if i == y else ' '))
if i in (y - 3, y + 1, y + 5):
years.append('<br><br>')
self.view.update_popup(''.join(years))
def generate_time(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
hours = ['<br> Hours:<br><br>']
for i in range(24):
hours.append('{6}{5}<a href="time:{0}-{1}-{2}-{3}-{4}">{3}</a> '.format(y, m, d, i, M, '•' if i == H else ' ', ' ' if i < 10 else ''))
if i in (7, 15, 23):
hours.append('<br><br>')
minutes = ['<br> Minutes:<br><br>']
for i in range(60):
minutes.append('{6}{5}<a href="time:{0}-{1}-{2}-{3}-{4}">{4}</a> '.format(y, m, d, H, i, '•' if i == M else ' ', ' ' if i < 10 else ''))
if i in (9, 19, 29, 39, 49, 59):
minutes.append('<br><br>')
confirm = ['<br> <a href="calendar:{0}-{1}-{2}-{3}-{4}">Confirm: {5}</a> <br><br>'.format(y, m, d, H, M, datetime(y, m, d, H, M, 0).strftime('%H:%M'))]
self.view.update_popup(''.join(hours + minutes + confirm))
def calendar(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
if m == 2 and d > 28:
d = 28
elif d == 31 and m in (4, 6, 9, 11):
d = 30
self.view.update_popup(self.generate_calendar(date=datetime(y, m, d, H, M, 0)))
def shift(stamp, month=0, year=0):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
date = datetime(y, m, d, H, M, 0) + relativedelta(months=month, years=year)
self.view.update_popup(self.generate_calendar(date))
case = {
'day': insert,
'month': generate_months,
'year': generate_years,
'time': generate_time,
'calendar': calendar,
'prev_month': lambda s=stamp: shift(s, month=-1),
'next_month': lambda s=stamp: shift(s, month=1),
'prev_year': lambda s=stamp: shift(s, year=-1),
'next_year': lambda s=stamp: shift(s, year=1)
}
self.view.update_popup('Loading...')
case[msg](stamp)
class PlainTasksRemain(PlainTasksViewEventListener):
def __init__(self, view):
self.view = view
self.phantom_set = sublime.PhantomSet(view, 'plain_tasks_remain_time')
self.view.settings().add_on_change('plain_tasks_remain_time_phantoms', self.check_setting)
self.phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
def check_setting(self):
'''add_on_change is issued on change of any setting in settings object'''
new_value = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
if self.phantoms == new_value:
return
self.phantoms = new_value
self.update()
def update(self):
self.phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
if not self.phantoms:
self.phantom_set.update([])
return
remain_format = self.view.settings().get('due_remain_format', '{time} remaining')
overdue_format = self.view.settings().get('due_overdue_format', '{time} overdue')
upd = []
for point, content in self.phantoms:
upd.append(sublime.Phantom(
sublime.Region(point),
(overdue_format if '-' in content else remain_format).format(time=content.lstrip('-') or 'a little bit'),
sublime.LAYOUT_BELOW))
self.phantom_set.update(upd)
def plugin_unloaded():
for window in sublime.windows():
for view in window.views():
view.settings().clear_on_change('plain_tasks_remain_time_phantoms')
| python |
import json
from src import util
from threading import Thread
f = open('infos/accounts.json', )
accounts = json.load(f)
f = open('infos/config.json', )
config = json.load(f)
with open('infos/usernames.txt', 'r') as f:
usernames = [line.strip() for line in f]
usernamesForAccount = config["usernamesForAccount"]
capacity = len(accounts) * usernamesForAccount
toSent = len(usernames)
if capacity < toSent:
print('Problem pasi kemi ' + str(len(accounts)) + ' accounte')
print('Problem pasi kemi ' + str(len(accounts) * usernamesForAccount) + ' username mundesi per ti derguar mesazh')
print('Problem pasi kemi ' + str(len(usernames)) + ' qe duam ti dergojme mesazh')
print('Problem pasi kemi ' + str(
len(usernames) - (len(accounts) * usernamesForAccount)) + ' username pa i derguar mesazh')
exit()
buttons = []
threads = []
timewait = 5
for account in accounts:
timewait += 5
if not account.get('password'):
account['password'] = config["defaultAccountPassword"]
if not usernames:
break
usernamesForAccountList = list()
for i in range(usernamesForAccount):
if not usernames:
break
usernamesForAccountList.append(usernames.pop())
# util.send_messages(account, usernamesForAccountList)
# util.send_groupmessages(account, usernamesForAccountList)
t = Thread(target=util.send_messages,
args=(account, usernamesForAccountList, timewait,)) # get number for place in list `buttons`
threads.append(t)
buttons.append(False) # create place
for t in threads:
print(t.name)
t.start()
for t in threads:
print(t.name)
t.join()
| python |
from collections import defaultdict
from datetime import timedelta
from django.contrib.sites.models import Site
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import signals
from django.utils import timezone
from cms.models import CMSPlugin
from cms.utils import get_language_from_request
from .compat import CMS_GTE_36
from .utils import get_plugin_fields, get_plugin_model
def delete_plugins(placeholder, plugin_ids, nested=True):
# With plugins, we can't do queryset.delete()
# because this would trigger a bunch of internal
# cms signals.
# Instead, delete each plugin individually and turn off
# position reordering using the _no_reorder trick.
plugins = (
placeholder
.cmsplugin_set
.filter(pk__in=plugin_ids)
.order_by('-depth')
.select_related()
)
bound_plugins = get_bound_plugins(plugins)
for plugin in bound_plugins:
plugin._no_reorder = True
if hasattr(plugin, 'cmsplugin_ptr'):
plugin.cmsplugin_ptr._no_reorder = True
# When the nested option is False
# avoid queries by preventing the cms from
# recalculating the child counter of this plugin's
# parent (for which there's none).
plugin.delete(no_mp=not nested)
def get_bound_plugins(plugins):
plugin_types_map = defaultdict(list)
plugin_lookup = {}
# make a map of plugin types, needed later for downcasting
for plugin in plugins:
plugin_types_map[plugin.plugin_type].append(plugin.pk)
for plugin_type, pks in plugin_types_map.items():
plugin_model = get_plugin_model(plugin_type)
plugin_queryset = plugin_model.objects.filter(pk__in=pks)
# put them in a map so we can replace the base CMSPlugins with their
# downcasted versions
for instance in plugin_queryset.iterator():
plugin_lookup[instance.pk] = instance
for plugin in plugins:
yield plugin_lookup.get(plugin.pk, plugin)
def get_plugin_data(plugin, only_meta=False):
if only_meta:
custom_data = None
else:
plugin_fields = get_plugin_fields(plugin.plugin_type)
_plugin_data = serializers.serialize('python', (plugin,), fields=plugin_fields)[0]
custom_data = _plugin_data['fields']
plugin_data = {
'pk': plugin.pk,
'creation_date': plugin.creation_date,
'position': plugin.position,
'plugin_type': plugin.plugin_type,
'parent_id': plugin.parent_id,
'data': custom_data,
}
return plugin_data
def get_active_operation(operations):
operations = operations.filter(is_applied=True)
try:
operation = operations.latest()
except ObjectDoesNotExist:
operation = None
return operation
def get_inactive_operation(operations, active_operation=None):
active_operation = active_operation or get_active_operation(operations)
if active_operation:
date_created = active_operation.date_created
operations = operations.filter(date_created__gt=date_created)
try:
operation = operations.filter(is_applied=False).earliest()
except ObjectDoesNotExist:
operation = None
return operation
def get_operations_from_request(request, path=None, language=None):
from .models import PlaceholderOperation
if not language:
language = get_language_from_request(language)
origin = path or request.path
# This is controversial :/
# By design, we don't let undo/redo span longer than a day.
# To be decided if/how this should be configurable.
date = timezone.now() - timedelta(days=1)
site = Site.objects.get_current(request)
queryset = PlaceholderOperation.objects.filter(
site=site,
origin=origin,
language=language,
user=request.user,
user_session_key=request.session.session_key,
date_created__gt=date,
is_archived=False,
)
return queryset
def disable_cms_plugin_signals(func):
# Skip this if we are using django CMS >= 3.6
if CMS_GTE_36:
return func
from cms.signals import (
post_delete_plugins, pre_delete_plugins, pre_save_plugins,
)
# The wrapped function NEEDS to set _no_reorder on any bound plugin instance
# otherwise this does nothing because it only disconnects signals
# for the cms.CMSPlugin class, not its subclasses
plugin_signals = (
(signals.pre_delete, pre_delete_plugins, 'cms_pre_delete_plugin', CMSPlugin),
(signals.pre_save, pre_save_plugins, 'cms_pre_save_plugin', CMSPlugin),
(signals.post_delete, post_delete_plugins, 'cms_post_delete_plugin', CMSPlugin),
)
def wrapper(*args, **kwargs):
for signal, handler, dispatch_id, model_class in plugin_signals:
signal.disconnect(
handler,
sender=model_class,
dispatch_uid=dispatch_id
)
signal.disconnect(handler, sender=model_class)
func(*args, **kwargs)
for signal, handler, dispatch_id, model_class in plugin_signals:
signal.connect(
handler,
sender=model_class,
dispatch_uid=dispatch_id
)
return wrapper
| python |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Timer
from copy import deepcopy
from typing import Optional
from deeppavlov.agents.default_agent.default_agent import DefaultAgent
from deeppavlov.core.agent.rich_content import RichMessage
from deeppavlov.core.common.log import get_logger
log = get_logger(__name__)
class Conversation:
"""Contains agent (if multi-instanced), receives requests, generates responses.
Args:
config: Alexa skill configuration settings.
agent: DeepPavlov Agent instance.
conversation_key: Alexa conversation ID.
self_destruct_callback: Conversation instance deletion callback function.
Attributes:
config: Alexa skill configuration settings.
agent: Alexa skill agent.
key: Alexa conversation ID.
stateful: Stateful mode flag.
timer: Conversation self-destruct timer.
handled_requests: Mapping of Alexa requests types to requests handlers.
response_template: Alexa response template.
"""
def __init__(self, config: dict, agent: DefaultAgent, conversation_key: str,
self_destruct_callback: callable) -> None:
self.config = config
self.agent = agent
self.key = conversation_key
self.self_destruct_callback = self_destruct_callback
self.stateful: bool = self.config['stateful']
self.timer: Optional[Timer] = None
self.handled_requests = {
'LaunchRequest': self._handle_launch,
'IntentRequest': self._handle_intent,
'SessionEndedRequest': self._handle_end,
'_unsupported': self._handle_unsupported
}
self.response_template = {
'version': '1.0',
'sessionAttributes': {
'sessionId': None
}
}
self._start_timer()
def _start_timer(self) -> None:
"""Initiates self-destruct timer."""
self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback)
self.timer.start()
def _rearm_self_destruct(self) -> None:
"""Rearms self-destruct timer."""
self.timer.cancel()
self._start_timer()
def handle_request(self, request: dict) -> dict:
"""Routes Alexa requests to appropriate handlers.
Args:
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
request_type = request['request']['type']
request_id = request['request']['requestId']
log.debug(f'Received request. Type: {request_type}, id: {request_id}')
if request_type in self.handled_requests.keys():
response: dict = self.handled_requests[request_type](request)
else:
response: dict = self.handled_requests['_unsupported'](request)
log.warning(f'Unsupported request type: {request_type}, request id: {request_id}')
self._rearm_self_destruct()
return response
def _act(self, utterance: str) -> list:
"""Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
"""
if self.stateful:
utterance = [[utterance], [self.key]]
else:
utterance = [[utterance]]
agent_response: list = self.agent(*utterance)
return agent_response
def _generate_response(self, response: dict, request: dict) -> dict:
"""Populates generated response with additional data conforming Alexa response specification.
Args:
response: Raw user input extracted from Alexa request.
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
response_template = deepcopy(self.response_template)
response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']
for key, value in response_template.items():
if key not in response.keys():
response[key] = value
return response
def _handle_intent(self, request: dict) -> dict:
"""Handles IntentRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
intent_name = self.config['intent_name']
slot_name = self.config['slot_name']
request_id = request['request']['requestId']
request_intent: dict = request['request']['intent']
if intent_name != request_intent['name']:
log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}")
return {'error': 'wrong intent name'}
if slot_name not in request_intent['slots'].keys():
log.error(f'No slot named {slot_name} found in request {request_id}')
return {'error': 'no slot found'}
utterance = request_intent['slots'][slot_name]['value']
agent_response = self._act(utterance)
if not agent_response:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
prediction: RichMessage = agent_response[0]
prediction: list = prediction.alexa()
if not prediction:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
response = self._generate_response(prediction[0], request)
return response
def _handle_launch(self, request: dict) -> dict:
"""Handles LaunchRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['start_message']
},
'card': {
'type': 'Simple',
'content': self.config['start_message']
}
}
}
response = self._generate_response(response, request)
return response
def _handle_end(self, request: dict) -> dict:
"""Handles SessionEndedRequest Alexa request and deletes Conversation instance.
Args:
request: Alexa request.
Returns:
response: Dummy empty response dict.
"""
response = {}
self.self_destruct_callback()
return response
def _handle_unsupported(self, request: dict) -> dict:
"""Handles all unsupported types of Alexa requests. Returns standard message.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['unsupported_message']
},
'card': {
'type': 'Simple',
'content': self.config['unsupported_message']
}
}
}
response = self._generate_response(response, request)
return response
| python |
"""Show the development of one optimization's criterion and parameters over time."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.layouts import Column
from bokeh.layouts import Row
from bokeh.models import ColumnDataSource
from bokeh.models import Panel
from bokeh.models import Tabs
from bokeh.models import Toggle
from estimagic.dashboard.monitoring_callbacks import activation_callback
from estimagic.dashboard.monitoring_callbacks import logscale_callback
from estimagic.dashboard.plot_functions import plot_time_series
from estimagic.logging.database_utilities import load_database
from estimagic.logging.database_utilities import read_last_rows
from estimagic.logging.read_log import read_start_params
from jinja2 import Environment
from jinja2 import FileSystemLoader
def monitoring_app(
doc,
database_name,
session_data,
updating_options,
start_immediately,
):
"""Create plots showing the development of the criterion and parameters.
Args:
doc (bokeh.Document): Argument required by bokeh.
database_name (str): Short and unique name of the database.
session_data (dict): Infos to be passed between and within apps.
Keys of this app's entry are:
- last_retrieved (int): last iteration currently in the ColumnDataSource.
- database_path (str or pathlib.Path)
- callbacks (dict): dictionary to be populated with callbacks.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
"""
# style the Document
template_folder = Path(__file__).resolve().parent
# conversion to string from pathlib Path is necessary for FileSystemLoader
env = Environment(loader=FileSystemLoader(str(template_folder)))
doc.template = env.get_template("index.html")
# process inputs
database = load_database(path=session_data["database_path"])
start_point = _calculate_start_point(database, updating_options)
session_data["last_retrieved"] = start_point
start_params = read_start_params(path_or_database=database)
start_params["id"] = _create_id_column(start_params)
group_to_param_ids = _map_group_to_other_column(start_params, "id")
group_to_param_names = _map_group_to_other_column(start_params, "name")
criterion_history, params_history = _create_cds_for_monitoring_app(
group_to_param_ids
)
# create elements
button_row = _create_button_row(
doc=doc,
database=database,
session_data=session_data,
start_params=start_params,
updating_options=updating_options,
)
monitoring_plots = _create_initial_convergence_plots(
criterion_history=criterion_history,
params_history=params_history,
group_to_param_ids=group_to_param_ids,
group_to_param_names=group_to_param_names,
)
# add elements to bokeh Document
grid = Column(children=[button_row, *monitoring_plots], sizing_mode="stretch_width")
convergence_tab = Panel(child=grid, title="Convergence Tab")
tabs = Tabs(tabs=[convergence_tab])
doc.add_root(tabs)
if start_immediately:
activation_button = doc.get_model_by_name("activation_button")
activation_button.active = True
def _create_id_column(df):
"""Create a column that gives the position for plotted parameters and is None else.
Args:
df (pd.DataFrame)
Returns:
ids (pd.Series): integer position in the DataFrame unless the group was
None, False, np.nan or an empty string.
"""
ids = pd.Series(range(len(df)), dtype=object, index=df.index)
ids[df["group"].isin([None, False, np.nan, ""])] = None
return ids.astype(str)
def _map_group_to_other_column(params, column_name):
"""Map the group name to lists of one column's values of the group's parameters.
Args:
params (pd.DataFrame): Includes the "group" and "id" columns.
column_name (str): name of the column for which to return the parameter values.
Returns:
group_to_values (dict): Keys are the values of the "group" column.
The values are lists of parameter values of the parameters belonging
to the particular group.
"""
to_plot = params[~params["group"].isin([None, False, np.nan, ""])]
group_to_indices = to_plot.groupby("group").groups
group_to_values = {}
for group, loc in group_to_indices.items():
group_to_values[group] = to_plot[column_name].loc[loc].tolist()
return group_to_values
def _create_cds_for_monitoring_app(group_to_param_ids):
"""Create the ColumnDataSources for saving the criterion and parameter values.
They will be periodically updated from the database.
There is a ColumnDataSource for all parameters and one for the criterion value.
The "x" column is called "iteration".
Args:
group_to_param_ids (dict): Keys are the groups to be plotted. The values are
the ids of the parameters belonging to the particular group.
Returns:
criterion_history (bokeh.ColumnDataSource)
params_history (bokeh.ColumnDataSource)
"""
crit_data = {"iteration": [], "criterion": []}
criterion_history = ColumnDataSource(crit_data, name="criterion_history_cds")
param_ids = []
for id_list in group_to_param_ids.values():
param_ids += id_list
params_data = {id_: [] for id_ in param_ids + ["iteration"]}
params_history = ColumnDataSource(params_data, name="params_history_cds")
return criterion_history, params_history
def _calculate_start_point(database, updating_options):
"""Calculate the starting point.
Args:
database (sqlalchemy.MetaData): Bound metadata object.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
start_point (int): iteration from which to start the dashboard.
"""
if updating_options["jump"]:
last_entry = read_last_rows(
database=database,
table_name="optimization_iterations",
n_rows=1,
return_type="list_of_dicts",
)
nr_of_entries = last_entry[0]["rowid"]
nr_to_go_back = updating_options["rollover"] * updating_options["stride"]
start_point = max(0, nr_of_entries - nr_to_go_back)
else:
start_point = 0
return start_point
def _create_initial_convergence_plots(
criterion_history,
params_history,
group_to_param_ids,
group_to_param_names,
):
"""Create the initial convergence plots.
Args:
criterion_history (bokeh ColumnDataSource)
params_history (bokeh ColumnDataSource)
group_to_param_ids (dict): Keys are the groups to be plotted. Values are the
ids of the parameters belonging to the respective group.
group_to_param_names (dict): Keys are the groups to be plotted. Values are the
names of the parameters belonging to the respective group.
Returns:
convergence_plots (list): List of bokeh Row elements, each containing one
convergence plot.
"""
param_plots = []
for group, param_ids in group_to_param_ids.items():
param_names = group_to_param_names[group]
param_group_plot = plot_time_series(
data=params_history,
y_keys=param_ids,
y_names=param_names,
x_name="iteration",
title=str(group),
)
param_plots.append(param_group_plot)
arranged_param_plots = [Row(plot) for plot in param_plots]
linear_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="linear_criterion_plot",
logscale=False,
)
log_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="log_criterion_plot",
logscale=True,
)
log_criterion_plot.visible = False
plot_list = [
Row(linear_criterion_plot),
Row(log_criterion_plot),
] + arranged_param_plots
return plot_list
def _create_button_row(
doc,
database,
session_data,
start_params,
updating_options,
):
"""Create a row with two buttons, one for (re)starting and one for scale switching.
Args:
doc (bokeh.Document)
database (sqlalchemy.MetaData): Bound metadata object.
session_data (dict): dictionary with the last retrieved rowid
start_params (pd.DataFrame): See :ref:`params`
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
bokeh.layouts.Row
"""
# (Re)start convergence plot button
activation_button = Toggle(
active=False,
label="Start Updating",
button_type="danger",
width=200,
height=30,
name="activation_button",
)
partialed_activation_callback = partial(
activation_callback,
button=activation_button,
doc=doc,
database=database,
session_data=session_data,
tables=["criterion_history", "params_history"],
start_params=start_params,
updating_options=updating_options,
)
activation_button.on_change("active", partialed_activation_callback)
# switch between linear and logscale button
logscale_button = Toggle(
active=False,
label="Show criterion plot on a logarithmic scale",
button_type="default",
width=200,
height=30,
name="logscale_button",
)
partialed_logscale_callback = partial(
logscale_callback,
button=logscale_button,
doc=doc,
)
logscale_button.on_change("active", partialed_logscale_callback)
button_row = Row(children=[activation_button, logscale_button], name="button_row")
return button_row
| python |
import sys
import re
def check_url(url):
patt = '^(\w+)://([0-9a-z.]+)(:\d+)?(?:/([0-9a-z_/.]+)?(\S+)?)?$'
m = re.match(patt, url, re.I)
if m:
schema = m.group(1)
port = m.group(3)
if port is None and schema == 'http':
port = 80
return {'schema': schema, 'hostname': m.group(2), 'port': port, 'path': m.group(4), 'qs': m.group(5)}
else:
return None
if __name__ == '__main__':
print(check_url(sys.argv[1])) | python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CognitiveServicesAccountCreateParameters(Model):
"""The parameters to provide for the account.
All required parameters must be populated in order to send to Azure.
:param sku: Required. Required. Gets or sets the SKU of the resource.
:type sku: ~azure.mgmt.cognitiveservices.models.Sku
:param kind: Required. Required. Gets or sets the Kind of the resource.
Possible values include: 'Bing.Autosuggest.v7', 'Bing.CustomSearch',
'Bing.Search.v7', 'Bing.Speech', 'Bing.SpellCheck.v7', 'ComputerVision',
'ContentModerator', 'CustomSpeech', 'CustomVision.Prediction',
'CustomVision.Training', 'Emotion', 'Face', 'LUIS', 'QnAMaker',
'SpeakerRecognition', 'SpeechTranslation', 'TextAnalytics',
'TextTranslation', 'WebLM'
:type kind: str or ~azure.mgmt.cognitiveservices.models.Kind
:param location: Required. Required. Gets or sets the location of the
resource. This will be one of the supported and registered Azure Geo
Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a
resource cannot be changed once it is created, but if an identical geo
region is specified on update the request will succeed.
:type location: str
:param tags: Gets or sets a list of key value pairs that describe the
resource. These tags can be used in viewing and grouping this resource
(across resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater than 128 characters and
value no greater than 256 characters.
:type tags: dict[str, str]
:param properties: Required. Must exist in the request. Must be an empty
object. Must not be null.
:type properties: object
"""
_validation = {
'sku': {'required': True},
'kind': {'required': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(self, *, sku, kind, location: str, properties, tags=None, **kwargs) -> None:
super(CognitiveServicesAccountCreateParameters, self).__init__(**kwargs)
self.sku = sku
self.kind = kind
self.location = location
self.tags = tags
self.properties = properties
| python |
#! /usr/bin/python3
#-*- coding: utf-8 -*-
from __future__ import print_function
import datetime
import sys
import re
class SscSite:
def __init__(self, **kwargs):
self.domes = kwargs['domes']
self.site_name = kwargs['site_name']
self.id = kwargs['id']
self.data_start = kwargs['data_start']
self.data_stop = kwargs['data_stop']
self.ref_epoch = kwargs['ref_epoch']
self.soln = int(kwargs['soln'])
self.x = float(kwargs['x'])
self.sx = float(kwargs['sx']) if 'sx' in kwargs else 0e0
self.y = float(kwargs['y'])
self.sx = float(kwargs['sy']) if 'sy' in kwargs else 0e0
self.z = float(kwargs['z'])
self.sz = float(kwargs['sz']) if 'sz' in kwargs else 0e0
self.vx = float(kwargs['vx'])
self.svx = float(kwargs['svx']) if 'svx' in kwargs else 0e0
self.vy = float(kwargs['vy'])
self.svx = float(kwargs['svy']) if 'svy' in kwargs else 0e0
self.vz = float(kwargs['vz'])
self.svz = float(kwargs['svz']) if 'svz' in kwargs else 0e0
def extrapolate(self, dt):
# print('\t>> extrapolating from SOLN={:}'.format(self.soln))
days = float((dt-self.ref_epoch).days)
years = days / 365.25e0
return self.x + self.vx*years, self.y + self.vy*years, self.z + self.vz*years
def parse_ssc_date(dstr, default=datetime.datetime.min):
if dstr.strip() == '00:000:00000':
return default
flds = dstr.split(':')
return datetime.datetime.strptime(':'.join(flds[0:2]), '%y:%j') + datetime.timedelta(seconds=int(flds[2]))
def min_of_ssc_records_of_same_site(ssc_recs):
rec = ssc_recs[0]
for i in ssc_recs[1:]:
if i.soln < rec.soln:
rec = i
return rec
def max_of_ssc_records_of_same_site(ssc_recs):
rec = ssc_recs[0]
for i in ssc_recs[1:]:
if i.soln > rec.soln:
rec = i
return rec
def match_site_in_rec_list(site, list):
for s in list:
if s.site_name == site.site_name:
return True
return False
def unique_records(ssc_records, dt):
ssc_unique_records = []
for site in ssc_records:
if not match_site_in_rec_list(site, ssc_unique_records):
# print('>> processing site {:}'.format(site.id))
site_recs = [s for s in ssc_records if s.site_name == site.site_name]
# print('\t>> num of entries = {:}'.format(len(site_recs)))
rec = None
max_date = datetime.datetime.min
min_date = datetime.datetime.max
for s in site_recs:
if s.data_start < min_date: min_date = s.data_start
if s.data_stop > max_date: max_date = s.data_stop
if dt >= s.data_start and dt <= s.data_stop:
ssc_unique_records.append(s)
rec = s
# print('\t>> matched interval! breaking ....')
break
if rec is None:
if dt < min_date:
ssc_unique_records.append(min_of_ssc_records_of_same_site(site_recs))
# print('\t>> interval unmatched, adding min soln ...')
elif dt > max_date:
ssc_unique_records.append(max_of_ssc_records_of_same_site(site_recs))
# print('\t>> interval unmatched, adding max soln ...')
else:
## probably no dt is between intervals ....
print('[WRNNG] No solution interval contains epoch {:} for site {:}_{:}; site skipped, don\'t know what to do!'.format(dt.strftime('%Y-%jT%H:%M'), site.id, site.domes), file=sys.stderr)
return ssc_unique_records
def parse_ssc(ssc_fn, station_list=[], dt=None):
ssc_records = []
with open(ssc_fn, 'r') as fin:
line = fin.readline()
while line and not line.lstrip().startswith('DOMES NB. SITE NAME TECH. ID.'):
line = fin.readline()
## 2 header lines
if not line:
errmsg = '[ERROR] Failed to find header line in SSC file {:}'.format(ssc_fn)
print(errmsg, file=sys.stderr)
raise RuntimeError(errmsg)
if not re.match(r"DOMES\s+NB\.\s+SITE NAME\s+TECH\. ID\.\s+X/Vx\s+Y/Vy\s+Z/Vz\.?\s+Sigmas\s+SOLN\s+DATA_START\s+DATA_END\s+REF\.\s+EPOCH", line.strip()):
errmsg = '[ERROR] Failed matching (column) header line! SSC file {:}'.format(ssc_fn)
print('[ERROR] Failed to resolve line: [{:}]'.format(line.strip()))
raise RuntimeError(errmsg)
line = fin.readline()
## examples of this line:
##[ CLASS ----------------------------m/m/Y-------------------------------------] (epn class A ssc)
##[<> -----------------------m/m/Y-------------------------] (epnd ssc)
assert(re.match(r"\s*[A-Z<>]*\s*-*m/m/Y-*", line.strip()))
line = fin.readline()
assert(line.strip().startswith('----------------------'))
## done with header, parse data
line = fin.readline()
while line:
domes, site_name, tech, id, x, y, z, sx, sy, sz, soln, data_start, data_end, ref_epoch = line.split()
x, y, z, sx, sy, sz = [float(n) for n in [x, y, z, sx, sy, sz]]
data_start, data_end, ref_epoch = [parse_ssc_date(d) for d in [data_start, data_end, ref_epoch]]
if data_end == datetime.datetime.min: data_end = datetime.datetime.max
line = fin.readline()
domes2, vx, vy, vz, svx, svy, svz = line.split()
assert(domes2 == domes)
vx, vy, vz, svx, svy, svz = [float(n) for n in [vx, vy, vz, svx, svy, svz]]
if site_name.lower() in [s.lower() for s in station_list] or station_list==[] and dt>=data_start :
ssc_records.append(SscSite(domes=domes, site_name=site_name, id=id, soln=soln, data_start=data_start, data_stop=data_end, ref_epoch=ref_epoch, x=x, y=y, z=z, sx=sx, sy=sy, sz=sz, vx=vx, vy=vy, vz=vz))
line = fin.readline()
return ssc_records if dt is None else unique_records(ssc_records, dt)
def ssc2crd(station_list, dt, *ssc_fn, **kwargs):
sta_list = station_list
sscsite_list = []
for ssc in ssc_fn:
# print('>> parsing ssc file {:}'.format(ssc))
records = parse_ssc(ssc, sta_list, dt)
for sta in records:
index = [s.lower() for s in sta_list].index(sta.site_name.lower())
if index >= 0:
sta_list[index] = 'xxxx'
sscsite_list += records
header = kwargs['header'] if 'header' in kwargs else 'Coordinate Extrapolation from pybern'
datum = kwargs['datum'] if 'datum' in kwargs else 'IGS_14'
flag = kwargs['flag'] if 'flag' in kwargs else 'APR'
with open(bcrd_out, 'w') as bout:
print("{:}".format(header), file=bout)
print("--------------------------------------------------------------------------------", file=bout)
print("LOCAL GEODETIC DATUM: {:} EPOCH: 2010-01-01 00:00:00".format(datum, dt.strftime("%Y-%m-%d %H:%M:%S")), file=bout)
print("", file=bout)
print("NUM STATION NAME X (M) Y (M) Z (M) FLAG", file=bout)
print("", file=bout)
for record in sscsite_list:
x, y, z = record.extrapolate(dt)
print('{:} {:} {:+15.3f} {:+15.3f} {:+15.3f}'.format(record.id, record.domes, x, y, z))
| python |
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from .catom import Member, DefaultValue, Validate
class Typed(Member):
""" A value which allows objects of a given type or types.
Values will be tested using the `PyObject_TypeCheck` C API call.
This call is equivalent to `type(obj) in cls.mro()`. It is less
flexible but faster than Instance. Use Instance when allowing
heterogenous values and Typed when the value type is explicit.
The value of a Typed may be set to None
"""
__slots__ = ()
def __init__(self, kind, args=None, kwargs=None, factory=None):
""" Initialize an Typed.
Parameters
----------
kind : type
The allowed type for the value.
args : tuple, optional
If 'factory' is None, then 'kind' is a callable type and
these arguments will be passed to the constructor to create
the default value.
kwargs : dict, optional
If 'factory' is None, then 'kind' is a callable type and
these keywords will be passed to the constructor to create
the default value.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
"""
if factory is not None:
self.set_default_value_mode(DefaultValue.CallObject, factory)
elif args is not None or kwargs is not None:
args = args or ()
kwargs = kwargs or {}
factory = lambda: kind(*args, **kwargs)
self.set_default_value_mode(DefaultValue.CallObject, factory)
self.set_validate_mode(Validate.Typed, kind)
class ForwardTyped(Typed):
""" A Typed which delays resolving the type definition.
The first time the value is accessed or modified, the type will
be resolved and the forward typed will behave identically to a
normal typed.
"""
__slots__ = ('resolve', 'args', 'kwargs')
def __init__(self, resolve, args=None, kwargs=None, factory=None):
""" Initialize a ForwardTyped.
resolve : callable
A callable which takes no arguments and returns the type to
use for validating the values.
args : tuple, optional
If 'factory' is None, then 'resolve' will return a callable
type and these arguments will be passed to the constructor
to create the default value.
kwargs : dict, optional
If 'factory' is None, then 'resolve' will return a callable
type and these keywords will be passed to the constructor to
create the default value.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
"""
self.resolve = resolve
self.args = args
self.kwargs = kwargs
if factory is not None:
self.set_default_value_mode(DefaultValue.CallObject, factory)
elif args is not None or kwargs is not None:
mode = DefaultValue.MemberMethod_Object
self.set_default_value_mode(mode, "default")
self.set_validate_mode(Validate.MemberMethod_ObjectOldNew, "validate")
def default(self, owner):
""" Called to retrieve the default value.
This is called the first time the default value is retrieved
for the member. It resolves the type and updates the internal
default handler to behave like a normal Typed member.
"""
kind = self.resolve()
args = self.args or ()
kwargs = self.kwargs or {}
factory = lambda: kind(*args, **kwargs)
self.set_default_value_mode(DefaultValue.CallObject, factory)
return kind(*args, **kwargs)
def validate(self, owner, old, new):
""" Called to validate the value.
This is called the first time a value is validated for the
member. It resolves the type and updates the internal validate
handler to behave like a normal Typed member.
"""
kind = self.resolve()
self.set_validate_mode(Validate.Typed, kind)
return self.do_validate(owner, old, new)
def clone(self):
""" Create a clone of the ForwardTyped instance.
"""
clone = super(ForwardTyped, self).clone()
clone.resolve = self.resolve
clone.args = self.args
clone.kwargs = self.kwargs
return clone
| python |
import unittest
from selenium import webdriver
class AdminLoginPageTest(unittest.TestCase):
def setUp(self):
self.admin_username = self.admin_password = 'admin'
self.site_title = 'Global Trade Motors'
self.browser = webdriver.Firefox()
self.browser.get("http://localhost:8000/admin")
def tearDown(self):
self.browser.quit()
def test_site_title(self):
self.assertIn(
self.site_title,
self.browser.title
)
def test_site_header_name(self):
header = self.browser.find_element_by_tag_name('h1')
self.assertEquals(
self.site_title,
header.text
)
class AdminHomePageTest(unittest.TestCase):
def setUp(self):
self.site_title = 'Global Trade Motors'
self.admin_username = self.admin_password = 'admin'
self.browser = webdriver.Firefox()
self.browser.get("http://localhost:8000/admin")
self.login()
def tearDown(self):
self.browser.quit()
def login(self):
self.browser.find_element_by_id(
'id_username').send_keys(self.admin_username)
password = self.browser.find_element_by_id(
'id_password')
password.send_keys(self.admin_password)
password.send_keys('\n')
def test_site_branding_header(self):
site_name = self.browser.find_element_by_id('site-name')
self.assertEquals(
self.site_title,
site_name.text
)
if __name__ == '__main__':
unittest.main()
| python |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Product(models.Model):
productname=models.CharField(max_length= 255)
productdescription=models.TextField(null=True, blank=True)
productusage=models.TextField(null=True, blank=True)
productquantity=models.IntegerField()
user=models.ForeignKey(User, on_delete = models.DO_NOTHING)
productcost=models.IntegerField()
def __str__(self):
return self.productname
class Meta:
db_table = 'Product'
class Monk(models.Model):
monkname=models.CharField(max_length= 255)
monkage=models.IntegerField()
user=models.ManyToManyField(User)
def __str__(self):
return self.monkname
class Meta:
db_table = 'Monk'
class Member(models.Model):
membername=models.CharField(max_length= 255)
memberage=models.IntegerField()
user=models.ManyToManyField(User)
memberaddress=models.CharField(max_length=255)
membercity=models.CharField(max_length=50)
memberstate=models.CharField(max_length=2)
memberzip=models.IntegerField(max_length=5)
memberphone=models.IntegerField(max_length=11)
def __str__(self):
return self. membername
class Meta:
db_table = 'Member'
| python |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import sys
import unittest
import datetime as dt
import timeit
import SimpleITK as sitk
import numpy as np
sizeX = 4
sizeY = 5
sizeZ = 3
newSimpleITKPixelValueInt32 = -3000
newNumPyElementValueInt32 = 200
class TestNumpySimpleITKMemoryviewInterface(unittest.TestCase):
""" This tests numpy array <-> SimpleITK Image conversion. """
def setUp(self):
pass
def _helper_check_sitk_to_numpy_type(self, sitkType, numpyType):
if sitkType == sitk.sitkUnknown:
return
image = sitk.Image((9, 10), sitkType, 1)
a = sitk.GetArrayViewFromImage(image)
self.assertEqual(numpyType, a.dtype)
self.assertEqual((10, 9), a.shape)
def test_type_to_numpy(self):
"try all sitk pixel types to convert to NumPy array view"
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt32, np.int32)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat64, np.float64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt32, np.int32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat64, np.float64)
def test_to_numpy_and_back(self):
"""Test converting an image to NumPy array view and back"""
img = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[10]*3, mean=[50,50] )
h = sitk.Hash( img )
img2 = sitk.GetImageFromArray( sitk.GetArrayViewFromImage(img))
self.assertEqual( h, sitk.Hash( img2 ))
def test_vector_image_to_numpy(self):
"""Test converting back and forth between NumPy array view and SimpleITK
images where the SimpleITK image has multiple components and
stored as a VectorImage."""
# Check 2D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4])
h = sitk.Hash( img )
nda = sitk.GetArrayViewFromImage(img)
self.assertEqual(nda.shape, (4,3,2))
self.assertEqual(nda[0,0].tolist(), [0,0])
self.assertEqual(nda[2,1].tolist(), [1,2])
self.assertEqual(nda[0,:,0].tolist(), [0,1,2])
img2 = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(h, sitk.Hash(img2))
# check 3D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4,5])
h = sitk.Hash(img)
nda = sitk.GetArrayViewFromImage(img)
self.assertEqual(nda.shape, (5,4,3,3))
self.assertEqual(nda[0,0,0].tolist(), [0,0,0])
self.assertEqual(nda[0,0,:,0].tolist(), [0,1,2])
self.assertEqual(nda[0,:,1,1].tolist(), [0,1,2,3])
img2 = sitk.GetImageFromArray(nda)
self.assertEqual(img2.GetSize(), img.GetSize())
self.assertEqual(img2.GetNumberOfComponentsPerPixel(), img.GetNumberOfComponentsPerPixel())
self.assertEqual(h, sitk.Hash(img2))
def test_arrayview_writable(self):
"""Test correct behavior of writablity to the returned array view."""
img = sitk.Image((9, 10), sitk.sitkFloat32, 1)
a = sitk.GetArrayViewFromImage(img)
with self.assertRaises(ValueError):
a.fill(0)
def test_processing_time(self):
"""Check the processing time the conversions from SimpleITK Image
to numpy array (GetArrayViewFromImage) and
numpy memoryview (GetArrayViewFromImage)."""
# Performance test for SimpleITK Image -> NumPy array
img = sitk.GaussianSource(sitk.sitkFloat32, [3000,3000], sigma=[10]*3, mean=[50,50])
print("\nGet NumPy array from 3000x3000 SimpleITK Image")
nparray_time_elapsed = min(timeit.repeat(lambda: sitk.GetArrayFromImage(img), repeat=5, number=1))
print ("Processing time of GetArrayFromImage (Copy operation) :: {0} (us)".format(nparray_time_elapsed*1e6))
npview_time_elapsed = min(timeit.repeat(lambda: sitk.GetArrayViewFromImage(img), repeat=5, number=1))
print ("Processing time of GetArrayViewFromImage (Array view) :: {0} (us)".format(npview_time_elapsed*1e6))
self.assertTrue( nparray_time_elapsed > npview_time_elapsed)
# Performance test for NumPy array -> SimpleITK Image
Big_nparray = np.zeros((3000,3000), dtype=np.int64);
if __name__ == '__main__':
unittest.main()
| python |
#!/usr/bin/env python2.7
#coding:utf-8
#import bpy
import os
import math
import random
from PIL import Image
import time
import codecs
import hjson
from bslideshow.slideshow import Slideshow
from bslideshow.tools import BlenderTools
ADJUST_Y = -0.1
class Director(BlenderTools):
def __init__ (self):
self.slideshow = None
self.frame = 0.0
self.sortPhotos = False
BlenderTools.__init__(self)
def buildSlideshow (self, i, folderImages):
#folderImages = "/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2"
slideshow = Slideshow('background' + str(i))
#slideshow.selectPhotos("/media/jmramoss/ALMACEN/slideshow/grid_frames/")
slideshow.selectPhotos(folderImages)
print("PRE")
print(slideshow.photos)
if False or (i == 0 and self.sortPhotos):
#sorted(slideshow.photos, key=path)
slideshow.photos.sort(key=lambda x: x.path)
print("POST")
print(slideshow.photos)
#quit()
if True and (i != 0 or (i == 0 and not self.sortPhotos)):
slideshow.shufflePhotos()
slideshow.draw()
#slideshow.alignColumn(separator=0.05)
slideshow.alignGrid(separator=0.2)
slideshow.shuffleTranslate(maxX = 0.05, maxY = 0.05)
slideshow.shuffleRotateZ()
return slideshow
def buildScene (self, folderImages):
import bpy
cam = bpy.data.objects['Camera']
print(str(type(cam)))
from pprint import pprint
pprint(cam)
print(str(cam.items()))
cam.data.clip_start = 0.001
#for i in range(1, 10):
# add_image("/media/jmramoss/ALMACEN/slideshow/ramsau-3564068_960_720.jpg", i)
slideshow = self.buildSlideshow(0, folderImages)
slideshow.parentObj.location[0] += 0.0
slideshow.parentObj.location[1] += 0.0
slideshow.parentObj.location[2] += 0.0
self.slideshow = slideshow
posZ = -0.5
#separator = 1.02
separator = -1.5
separator = 1.2
incZ = -1.1 * 5
for i in range(0, 0):
randomX = 0
randomY = 0
if False:
slideshow = self.buildSlideshow(1, folderImages)
slideshow.parentObj.location[0] += (random.uniform(-0.3, 0.3) * 1)
slideshow.parentObj.location[1] += (random.uniform(-0.3, 0.3) * 1)
slideshow.parentObj.location[2] += (2.0 * posZ) + incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(2, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += 0 + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(3, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += 0 + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(4, folderImages)
slideshow.parentObj.location[0] += 0 + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(5, folderImages)
slideshow.parentObj.location[0] += 0 + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(6, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(7, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(8, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(9, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
'''
for i in range(2):
slideshow = Slideshow('background' + str(i))
#slideshow.selectPhotos("/media/jmramoss/ALMACEN/slideshow/grid_frames/")
slideshow.selectPhotos("/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
slideshow.shufflePhotos()
slideshow.draw()
#slideshow.alignColumn()
slideshow.alignGrid()
slideshow.shuffleTranslate()
slideshow.shuffleRotateZ()
slideshow.parentObj.location[0] += (random.uniform(-0.3, 0.3) * i)
slideshow.parentObj.location[1] += (random.uniform(-0.3, 0.3) * i)
slideshow.parentObj.location[2] += (-0.1 * i)
if i == 0:
self.slideshow = slideshow
'''
'''
#obj_camera = bpy.context.scene.camera
# Set camera translation
#scene.camera.location.x = 0.0
#scene.camera.location.y = 0.0
#scene.camera.location.z = 80.0
#fov = 50.0
#pi = 3.14159265
# Set camera fov in degrees
#scene.camera.data.angle = fov*(pi/180.0)
'''
def camLookAt (self):
import bpy
if(len(bpy.data.cameras) == 1):
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.location.x = 10.0
obj.location.y = -5.0
obj.location.z = 5.0
pass
'''
# Set camera rotation in euler angles
#rx = 0.0
#ry = 0.0
#rz = 0.0
#scene.camera.rotation_mode = 'XYZ'
#scene.camera.rotation_euler[0] = rx*(pi/180.0)
#scene.camera.rotation_euler[1] = ry*(pi/180.0)
#scene.camera.rotation_euler[2] = rz*(pi/180.0)
'''
def camRotate (self, rx, ry, rz):
import bpy
if(len(bpy.data.cameras) == 1):
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.rotation_mode = 'XYZ'
obj.rotation_euler[0] = rx*(math.pi/180.0)
obj.rotation_euler[1] = ry*(math.pi/180.0)
obj.rotation_euler[2] = rz*(math.pi/180.0)
pass
def showPicture (self, picName):
import bpy
pic = bpy.data.objects[picName]
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.rotation_mode = 'XYZ'
obj.location.x = pic.location.x
obj.location.y = pic.location.y
obj.location.z = pic.location.z + 4.0
rx = 0
ry = 0
rz = 0
obj.rotation_euler[0] = rx*(math.pi/180.0)
obj.rotation_euler[1] = ry*(math.pi/180.0)
obj.rotation_euler[2] = rz*(math.pi/180.0)
''' Animation
#if(len(bpy.data.cameras) == 1):
# obj = bpy.data.objects['Camera'] # bpy.types.Camera
# obj.location.x = 0.0
# obj.location.y = -10.0
# obj.location.z = 10.0
# obj.keyframe_insert(data_path="location", frame=10.0)
# obj.location.x = 10.0
# obj.location.y = 0.0
# obj.location.z = 5.0
# obj.keyframe_insert(data_path="location", frame=20.0)
'''
def showSlideshow2 (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
for i in range(numPhotos):
idx = i + 1
picName = 'pic' + str(idx)
self.showPicture(picName)
frame = i * incFrames
cam = bpy.data.objects['Camera'] # bpy.types.Camera
if i == 0:
cam.keyframe_insert(data_path="location", frame=frame+(2*24))
else:
cam.keyframe_insert(data_path="location", frame=frame-(2*24))
cam.keyframe_insert(data_path="location", frame=frame)
def showSlideshow3 (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
for i in range(numPhotos):
idx = i + 1
picName = 'pic' + str(idx)
self.showPicture(picName)
frame = i * incFrames
incZ = random.uniform(-3.0, 3.0)
cam.location.z = startCamLocationZ + incZ
rx = 3.0 if i % 2 == 0 else 0.0
ry = 0.0 if i % 2 == 0 else 6.0
rz = 0.0 if i % 2 == 0 else 15.0
cam.rotation_euler[1] = rx*(math.pi/180.0)
cam.rotation_euler[1] = ry*(math.pi/180.0)
cam.rotation_euler[2] = rz*(math.pi/180.0)
if i == 0:
cam.keyframe_insert(data_path="location", frame=frame+(2*24))
cam.keyframe_insert(data_path="rotation_euler", frame=frame+(2*24))
else:
cam.keyframe_insert(data_path="location", frame=frame-(2*24))
cam.keyframe_insert(data_path="rotation_euler", frame=frame-(2*24))
cam.keyframe_insert(data_path="location", frame=frame)
cam.keyframe_insert(data_path="rotation_euler", frame=frame)
def showSlideshowDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
idx = random.randint(1, numPhotos)
picName = 'pic' + str(idx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
cam.location.z -= random.uniform(1.0, 2.5)
cam.rotation_euler[0] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showSlideshow (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
idx = random.randint(1, numPhotos)
picName = 'pic' + str(idx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
cam.location.z -= random.uniform(1.0, 2.5)
cam.rotation_euler[0] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showRowColumnDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
pic = bpy.data.objects[picName]
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showAllPhotos (self, duration=120, zoom=True, onlyEnd=False):
import bpy
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
if zoom:
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
else:
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
if not onlyEnd:
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def getAllPics (self):
result = list()
for p in self.slideshow.photos:
result.append(p.obj)
return result
def showAllPhotosPicZoomIn (self, picName, duration=120):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = int(duration / 4)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
#cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = centerPosition[2] + random.uniform(-0.001, 0.001)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showAllPhotosPicZoomOut (self, picName, duration=120):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = int(duration / 4)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
#cam.location.z = centerPosition[2] + random.uniform(-0.001, 0.001)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showAllPhotosPic (self, picName, duration=120, zoom=True):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
if zoom:
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
else:
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = 24*3
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if zoom:
self.showObjects(allPics)
else:
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
if not zoom:
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if zoom:
self.showObjects([pic])
else:
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showLinePhotosGroup (self, duration=120, picNameStart=None, picNameEnd=None, zoom=None, groupStart=None, groupEnd=None):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
picStart = None
if picNameStart is None:
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
picStart = bpy.data.objects[picName]
else:
picStart = bpy.data.objects[picNameStart]
zoomMinZ = 3.5
zoomMaxZ = 5.0
if zoom == 0:
zoomMinZ = 1.8
zoomMaxZ = 2.5
elif zoom == 1:
zoomMinZ = 2.5
zoomMaxZ = 3.5
elif zoom == 2:
zoomMinZ = 5.0
zoomMaxZ = 6.0
elif zoom == 3:
zoomMinZ = 7.0
zoomMaxZ = 8.0
timeStartEnd = int(duration / 6)
cam.rotation_mode = 'XYZ'
cam.location.x = picStart.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picStart.location.y + random.uniform(-0.01, 0.01) + ADJUST_Y
cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
if groupStart is not None and len(groupStart) > 0:
pics = list()
for groupName in groupStart:
picGroup = bpy.data.objects[groupName]
pics.append(picGroup)
self.showObjects(pics)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeStartEnd)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeStartEnd)
picEnd = None
if picNameEnd is None:
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
picEnd = bpy.data.objects[picName]
else:
picEnd = bpy.data.objects[picNameEnd]
cam.location.x = picEnd.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picEnd.location.y + random.uniform(-0.01, 0.01) + ADJUST_Y
cam.location.z = picEnd.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if groupEnd is not None and len(groupEnd) > 0:
pics = list()
for groupName in groupEnd:
picGroup = bpy.data.objects[groupName]
pics.append(picGroup)
self.showObjects(pics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - timeStartEnd - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - timeStartEnd - 12)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showLinePhotos (self, duration=120, picNameStart=None, picNameEnd=None, zoom=None):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
picStart = None
if picNameStart is None:
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
picStart = bpy.data.objects[picName]
else:
picStart = bpy.data.objects[picNameStart]
zoomMinZ = 3.5
zoomMaxZ = 5.0
if zoom == 0:
zoomMinZ = 1.8
zoomMaxZ = 2.5
elif zoom == 1:
zoomMinZ = 2.5
zoomMaxZ = 3.5
elif zoom == 2:
zoomMinZ = 5.0
zoomMaxZ = 6.0
elif zoom == 3:
zoomMinZ = 7.0
zoomMaxZ = 8.0
timeStartEnd = int(duration / 8)
cam.rotation_mode = 'XYZ'
cam.location.x = picStart.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picStart.location.y + random.uniform(-0.01, 0.01)
cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
self.showObjects([picStart])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeStartEnd)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeStartEnd)
picEnd = None
if picNameEnd is None:
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
picEnd = bpy.data.objects[picName]
else:
picEnd = bpy.data.objects[picNameEnd]
cam.location.x = picEnd.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picEnd.location.y + random.uniform(-0.01, 0.01)
cam.location.z = picEnd.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([picEnd])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - timeStartEnd - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - timeStartEnd - 12)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showRowColumn (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
pic = bpy.data.objects[picName]
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showZoomInOutDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + startZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endZ = startZ - 3.0 if startZ > 3.0 else startZ + 2.0
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + endZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showZoomInOut (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + startZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endZ = startZ - 3.0 if startZ > 3.0 else startZ + 2.0
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + endZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
#Se acerca y se aleja de una foto
def showDeleite (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
mitad1Frames = incFrames/2
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ = 2.0
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(incFrames/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(incFrames/2))
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showDeleiteDuration (self, duration=120, picName=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName is None:
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> numPHOTOSOSSSSSSSSSSSS = " + str(numPhotos))
numPhotos = len(self.slideshow.photos)#16
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ = 2.0
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showObjects (self, selection):
import bpy
scene = bpy.context.scene
for obj in scene.objects:
obj.select = False
for obj in selection:
obj.select = True
bpy.ops.view3d.camera_to_view_selected()
'''
def getDistanceMaxXY (self, pic1, pic2):
result = None
result = (maxX, maxY)
return result
'''
'''
from bpy import context
# Select objects that will be rendered
for obj in scene.objects:
obj.select = False
for obj in context.visible_objects:
if not (obj.hide or obj.hide_render):
obj.select = True
bpy.ops.view3d.camera_to_view_selected()
'''
'''
camera_fit_coords(scene, coordinates)
Compute the coordinate (and scale for ortho cameras) given object should be to ‘see’ all given coordinates
Parameters:
scene (Scene) – Scene to get render size information from, if available
coordinates (float array of 1 items in [-inf, inf], (never None)) – Coordinates to fit in
Return (co_return, scale_return):
co_return, The location to aim to be able to see all given points, float array of 3 items in [-inf, inf]
scale_return, The ortho scale to aim to be able to see all given points (if relevant), float in [-inf, inf]
'''
#Se acerca y se aleja de una foto
def showDeleiteTwoPhotos (self, duration=120, picName1=None, picName2=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName1 is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName1 = 'pic' + str(startIdx)
pic1 = bpy.data.objects[picName1]
if picName2 is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName2 = 'pic' + str(startIdx)
pic2 = bpy.data.objects[picName2]
pos = [0, 0, 0]
pos[0] = (pic1.location.x + pic2.location.x) / 2.0
pos[1] = (pic1.location.y + pic2.location.y) / 2.0
pos[2] = (pic1.location.z + pic2.location.z) / 2.0
#initZ1 = random.uniform(5.0, 5.5)
#initZ2 = random.uniform(4.5, 5.0)
initZ1 = random.uniform(3.01, 3.5)
initZ2 = random.uniform(2.5, 3.0)
#factorRandom1 = random.uniform(0.26, 0.31)
factorRandom1 = random.uniform(0.01, 0.05)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pos[0] + random.uniform(- factorRandom1, factorRandom1)
cam.location.y = pos[1] + random.uniform(- factorRandom1, factorRandom1) + ADJUST_Y
cam.location.z = pos[2] + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pos[0] + random.uniform(factorRandom2, factorRandom2)
cam.location.y = pos[1] + random.uniform(factorRandom2, factorRandom2) + ADJUST_Y
cam.location.z = pos[2] + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
'''
scene = bpy.context.scene
c1Pic1 = self.getCorner1(pic1)
c2Pic1 = self.getCorner2(pic1)
c3Pic1 = self.getCorner3(pic1)
c4Pic1 = self.getCorner4(pic1)
c1Pic2 = self.getCorner1(pic2)
c2Pic2 = self.getCorner2(pic2)
c3Pic2 = self.getCorner3(pic2)
c4Pic2 = self.getCorner4(pic2)
co_return, scale_return = cam.camera_fit_coords(scene, (c1Pic1[0], c1Pic1[1], c1Pic1[2], c2Pic1[0], c2Pic1[1], c2Pic1[2], c3Pic1[0], c3Pic1[1], c3Pic1[2], c4Pic1[0], c4Pic1[1], c4Pic1[2], c1Pic2[0], c1Pic2[1], c1Pic2[2], c2Pic2[0], c2Pic2[1], c2Pic2[2], c3Pic2[0], c3Pic2[1], c3Pic2[2], c4Pic2[0], c4Pic2[1], c4Pic2[2]))
cam.location.x = co_return[0]
cam.location.y = co_return[1]
#cam.location.z = co_return[2]
cam.scale[0] = scale_return
cam.scale[1] = scale_return
cam.scale[2] = scale_return
'''
self.showObjects([pic1, pic2])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
#cam.keyframe_insert(data_path="scale", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def getCorner1 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x - (pic.dimensions[0]/2.0)
result[1] = pic.location.y + (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner2 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x + (pic.dimensions[0]/2.0)
result[1] = pic.location.y + (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner3 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x - (pic.dimensions[0]/2.0)
result[1] = pic.location.y - (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner4 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x + (pic.dimensions[0]/2.0)
result[1] = pic.location.y - (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
#Se acerca y se aleja de una foto
def showDeleiteOnePhoto (self, duration=120, picName=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
#initZ1 = random.uniform(2.51, 3.0)
#initZ2 = random.uniform(2.0, 2.5)
initZ1 = random.uniform(2.01, 2.5)
initZ2 = random.uniform(1.8, 2.0)
#factorRandom1 = random.uniform(0.06, 0.10)
factorRandom1 = random.uniform(0.01, 0.05)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(- factorRandom1, factorRandom1)
cam.location.y = pic.location.y + random.uniform(- factorRandom1, factorRandom1) + ADJUST_Y
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(factorRandom2, factorRandom2)
cam.location.y = pic.location.y + random.uniform(factorRandom2, factorRandom2) + ADJUST_Y
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
'''
scene = bpy.context.scene
c1Pic1 = self.getCorner1(pic)
c2Pic1 = self.getCorner2(pic)
c3Pic1 = self.getCorner3(pic)
c4Pic1 = self.getCorner4(pic)
co_return, scale_return = cam.camera_fit_coords(scene, (c1Pic1[0], c1Pic1[1], c1Pic1[2], c2Pic1[0], c2Pic1[1], c2Pic1[2], c3Pic1[0], c3Pic1[1], c3Pic1[2], c4Pic1[0], c4Pic1[1], c4Pic1[2]))
cam.location.x = co_return[0]
cam.location.y = co_return[1]
#cam.location.z = co_return[2]
cam.scale[0] = scale_return
cam.scale[1] = scale_return
cam.scale[2] = scale_return
'''
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
#cam.keyframe_insert(data_path="scale", frame=self.frame + duration - 12)
self.frame = self.frame + duration
#Se acerca y se aleja de una foto
def showDeleiteOnePhotoProject (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ1 = random.uniform(1.5, 1.8)
initZ2 = random.uniform(1.5, 1.8)
initZ3 = random.uniform(1.5, 1.8)
factorRandom1 = random.uniform(0.50, 1.00)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + (duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + (duration/2))
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ3
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + (duration))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + (duration))
self.frame = self.frame + duration
#Se acerca y se aleja de una foto
def showDeleiteOnePhotoSection (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ1 = random.uniform(4.5, 6.0)
initZ2 = random.uniform(3.0, 4.0)
factorRandom1 = random.uniform(0.50, 1.00)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration
def doAnimSlideshow (self, folderImages, time=None, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
#filepath imgBackground
#bpy.context.scene.node_tree.nodes['imgBackground'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background.jpg'
bpy.data.images['background'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background2.jpg'
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
if time is None:
if sizeBorder > 4:
time = int(float(numPhotos) * 2.5)
else:
time = numPhotos * 3
rest = list()
for i in range(0, numPhotos):
pic = 'pic' + str((i+1))
rest.append(pic)
dataAnim = list()
#dataAnim.append({'type': 'zoom_in', 'time': 240})
#dataAnim.append({'type': 'zoom_out', 'time': 120})
#dataAnim.append({'type': 'corners', 'start': picCorner, 'end': picExtremo, 'time': 360})
#dataAnim.append({'type': 'line', 'start': picStart, 'end': picExtremo, 'time': 360})
#dataAnim.append({'type': 'one', 'pic': picOne, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
durationZoomIn = 240
durationZoomOut = 120
durationCorner = 760
durationLine = 560
durationTwoPhotos = 120
durationOnePhoto = 120
if sizeBorder == 6:
durationZoomIn = 240
durationZoomOut = 120
durationCorner = 760
durationLine = 560
elif sizeBorder == 5:
durationZoomIn = 192
durationZoomOut = 96
durationCorner = 700
durationLine = 500
elif sizeBorder == 4:
durationZoomIn = 144
durationZoomOut = 72
durationCorner = 640
durationLine = 440
elif sizeBorder == 3:
durationZoomIn = 72
durationZoomOut = 48
durationCorner = 580
durationLine = 380
elif sizeBorder == 2:
durationZoomIn = 72
durationZoomOut = 48
durationCorner = 520
durationLine = 320
picZoomIn = int(numPhotos / 2)
picZoomOut = picZoomIn + 1
if sizeBorder == 6:
picZoomIn = 15
picZoomOut = 22
elif sizeBorder == 5:
picZoomIn = 12
picZoomOut = 14
elif sizeBorder == 4:
picZoomIn = 6
picZoomOut = 11
elif sizeBorder == 3:
picZoomIn = 4
picZoomOut = 6
elif sizeBorder == 3:
picZoomIn = 1
picZoomOut = 4
picZoomInName = 'pic' + str(picZoomIn)
picZoomOutName = 'pic' + str(picZoomOut)
if picZoomInName in rest:
rest.remove(picZoomInName)
if picZoomOutName in rest:
rest.remove(picZoomOutName)
if sizeBorder > 3:
#corner
picCorners = self.getCornerPictures()
picCorner = random.choice(picCorners)
picExtremo = self.getPicExtremoCorner(picCorner)
if picCorner in picCorners:
picCorners.remove(picCorner)
if picExtremo in picCorners:
picCorners.remove(picExtremo)
if picCorner in rest:
rest.remove(picCorner)
if picExtremo in rest:
rest.remove(picExtremo)
picMiddle = self.getPicMiddle(picCorner, picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
dataAnim.append({'type': 'corners', 'start': picCorner, 'end': picExtremo, 'time': durationCorner, 'zoom': 1})
if sizeBorder >= 5:
vecinosCorner = self.getPicVecinosCorner(picCorner, picExtremo)
for itemVecino in picMiddle:
if itemVecino in rest:
rest.remove(itemVecino)
#self.showLinePhotos(duration=360, picNameStart=picCorner, picNameEnd=picExtremo, zoom=2)
if sizeBorder > 3:
#line
allBorders = self.getExternPictures()
picBorders = list()
for pic in allBorders:
if pic not in picCorners:
picBorders.append(pic)
#picBorders = [x for x in self.getExternPictures() if x not in picCorners]
picStart = random.choice(picBorders)
picExtremo = self.getPicExtremo(picStart)
if picStart in picBorders:
picBorders.remove(picStart)
if picExtremo in picBorders:
picBorders.remove(picExtremo)
if picStart in rest:
rest.remove(picStart)
if picExtremo in rest:
rest.remove(picExtremo)
picMiddle = self.getPicMiddle(picStart, picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
dataAnim.append({'type': 'line', 'start': picStart, 'end': picExtremo, 'time': durationLine, 'zoom': 0})
#self.showLinePhotos(duration=360, picNameStart=picStart, picNameEnd=picExtremo, zoom=1)
numPendientes = len(rest)
numParejas = int((1.0/3.0)*numPendientes)
numIndividuales = numPendientes - (2*numParejas)
while numParejas > 0:
item = random.choice(rest)
masCercana = self.getPhotoMasCercana(item, rest)
if item is not None and masCercana is not None:
if item in rest:
rest.remove(item)
if masCercana in rest:
rest.remove(masCercana)
dataAnim.append({'type': 'two', 'pic1': item, 'pic2': masCercana, 'time': durationTwoPhotos})
numParejas -= 1
numIndividuales += (2*numParejas)
while numIndividuales > 0:
item = random.choice(rest)
if item is not None:
if item in rest:
rest.remove(item)
dataAnim.append({'type': 'one', 'pic': item, 'time': durationOnePhoto})
numIndividuales -= 1
#self.showDeleiteOnePhoto(duration=120, picName='pic1')
#dataAnim.append({'type': 'one', 'pic': 'pic1', 'time': 120})
#self.showDeleiteTwoPhotos(duration=120, picName1='pic1', picName2='pic2')
#dataAnim.append({'type': 'two', 'pic1': 'pic1', 'pic2': 'pic12', 'time': 120})
if time is not None:
totalTimeFrames = 0
totalTimeFrames += durationZoomIn
for itemAnim in dataAnim:
totalTimeFrames += itemAnim['time']
totalTimeFrames += durationZoomOut
maxTimeFrames = time * 24
if totalTimeFrames != maxTimeFrames:
porcentaje = float(maxTimeFrames) / float(totalTimeFrames)
durationZoomIn = int(porcentaje * float(durationZoomIn))
durationZoomOut = int(porcentaje * float(durationZoomOut))
for itemAnim in dataAnim:
itemAnim['time'] = int(porcentaje * float(itemAnim['time']))
#zoom in
self.showAllPhotosPicZoomIn(picName=picZoomInName, duration=durationZoomIn)
while len(dataAnim) > 0:
itemAnim = random.choice(dataAnim)
if itemAnim['type'] == 'corners':
self.showLinePhotosGroup(duration=itemAnim['time'], picNameStart=itemAnim['start'], picNameEnd=itemAnim['end'], zoom=itemAnim['zoom'], groupStart=self.get4PicsCorner(itemAnim['start']), groupEnd=self.get4PicsCorner(itemAnim['end']))
elif itemAnim['type'] == 'line':
self.showLinePhotosGroup(duration=itemAnim['time'], picNameStart=itemAnim['start'], picNameEnd=itemAnim['end'], zoom=itemAnim['zoom'], groupStart=None, groupEnd=None)
elif itemAnim['type'] == 'one':
self.showDeleiteOnePhoto(duration=itemAnim['time'], picName=itemAnim['pic'])
elif itemAnim['type'] == 'two':
self.showDeleiteTwoPhotos(duration=itemAnim['time'], picName1=itemAnim['pic1'], picName2=itemAnim['pic2'])
if itemAnim in dataAnim:
dataAnim.remove(itemAnim)
#zoom out
self.showAllPhotosPicZoomOut(picName=picZoomOutName, duration=durationZoomOut)
frameEnd = self.frame
#frameEnd = 120
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def getPhotoMasCercana (self, pivot, listado):
result = None
curDistance = 99999999999
for item in listado:
if item != pivot:
distance = self.getPhotoDistance(pivot, item)
if distance < curDistance:
result = item
curDistance = distance
return result
def getPhotoDistance (self, item1, item2):
result = None
if item1 is not None and item2 is not None:
import bpy
pic1 = bpy.data.objects[item1]
pic2 = bpy.data.objects[item2]
result = math.sqrt(math.pow((pic1.location.x - pic2.location.x), 2) + math.pow((pic1.location.y - pic2.location.y), 2) + math.pow((pic1.location.z - pic2.location.z), 2))
return result
def doAnimSceneDeleiteAllPhotos (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
'''
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picName = 'pic' + str(startIdx)
#self.showDeleiteOnePhoto(duration=120, picName=picName)
self.showDeleiteDuration(duration=240, picName=picName)
'''
'''
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picNameStart = 'pic' + str(startIdx)
for j in range(0, numPhotos):
endIdx = j + 1
picNameEnd = 'pic' + str(endIdx)
if i != j:
self.showLinePhotos(duration=120, picNameStart=picNameStart, picNameEnd=picNameEnd)
'''
'''
for i in range(0, numPhotos):
startIdx = i + 1
picNameStart = 'pic' + str(startIdx)
for j in range(0, numPhotos):
endIdx = j + 1
picNameEnd = 'pic' + str(endIdx)
if i != j:
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
distance = self.distancePic2Line2Pics(picDistance, picNameStart, picNameEnd)
print("start = " + picNameStart + " end = " + picNameEnd + " distance " + str(distance) + " to " + picDistance)
picBorders = self.getExternPictures()
for pic1 in picBorders:
for pic2 in picBorders:
if pic1 != pic2:
pendiente = self.getPendiente2Pics(pic1, pic2)
pendiente = pendiente if pendiente is not None else 'None'
print('pendiente = ' + str(pendiente) + " pics = " + pic1 + "+" + pic2)
picBorders = self.getExternPictures()
print("borders = " + str(picBorders))
for pic1 in picBorders:
print("for " + pic1 + " extremo is = " + str(self.getPicExtremo(pic1)))
#print("for pic2 extremo is = " + str(self.getPicExtremo('pic2')))
'''
'''
picBorders = self.getExternPictures()
for picBorder1 in picBorders:
picExtremo = self.getPicExtremo(picBorder1)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
distance = self.distancePic2Line2Pics(picDistance, picBorder1, picExtremo)
if distance < 0.5:
print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
'''
rest = list()
for i in range(0, numPhotos):
pic = 'pic' + str((i+1))
rest.append(pic)
'''
maxTry3 = 10
picBorders = self.getExternPictures()
while len(rest) > 0 and maxTry3 > 0:
picBorder = None
picExtremo = None
maxTry2 = 10
while maxTry2 > 0:
picBorder = None
maxTry = 10
while maxTry > 0:
picBorder = random.choice(picBorders)
if picBorder in rest:
break
maxTry -= 1
if picBorder is not None:
picExtremo = self.getPicExtremo(picBorder)
if picExtremo in rest:
break
maxTry2 -= 1
if picBorder is not None and picExtremo is not None:
picMiddle = self.getPicMiddle(picBorder, picExtremo)
valid = True if len(picMiddle) <= 0 else False
for itemMiddle in picMiddle:
if itemMiddle in rest:
valid = True
break
if valid:
if picBorder in rest:
rest.remove(picBorder)
if picExtremo in rest:
rest.remove(picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
else:
maxTry3 -= 1
self.showLinePhotos(duration=120, picNameStart=picBorder, picNameEnd=picExtremo)
else:
maxTry3 -= 1
'''
'''
maxTry3 = 10
#picBorders = self.getExternPictures()
picBorders = self.getCornerPictures()
while len(rest) > 0 and maxTry3 > 0:
line = self.selectLinePath(rest, picBorders)
if line is None:
maxTry3 -= 1
else:
print("rest = " + str(rest))
print("line = " + str(line))
self.showLinePhotos(duration=48, picNameStart=line[0], picNameEnd=line[1])
'''
'''
for zoom in range(1, 4):
picCorners = self.getCornerPictures()
for i in range(0, 2):
picCorner = random.choice(picCorners)
picExtremo = self.getPicExtremoCorner(picCorner)
picCorners.remove(picCorner)
picCorners.remove(picExtremo)
self.showLinePhotos(duration=240, picNameStart=picCorner, picNameEnd=picExtremo, zoom=zoom)
'''
self.showAllPhotos(duration=120, zoom=True)
self.showAllPhotos(duration=120, zoom=False)
frameEnd = self.frame
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def selectLinePath (self, rest, picBorders=None):
result = None
maxTry3 = 10
picBorders = self.getExternPictures() if picBorders is None else picBorders
#print("strssssss" + str(picBorders))
while maxTry3 > 0:
picBorder = None
picExtremo = None
maxTry2 = 10
while maxTry2 > 0:
picBorder = None
maxTry = 10
while maxTry > 0:
picBorder = random.choice(picBorders)
if picBorder in rest:
break
picBorder = None
maxTry -= 1
if picBorder is not None:
picExtremo = self.getPicExtremo(picBorder)
if picExtremo in rest and picExtremo in picBorders:
break
picExtremo = None
maxTry2 -= 1
if picBorder is not None and picExtremo is not None:
picMiddle = self.getPicMiddle(picBorder, picExtremo)
valid = True if len(picMiddle) <= 0 else False
for itemMiddle in picMiddle:
if itemMiddle in rest:
valid = True
break
if valid:
if picBorder in rest:
rest.remove(picBorder)
if picExtremo in rest:
rest.remove(picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
result = (picBorder, picExtremo)
#self.showLinePhotos(duration=120, picNameStart=picBorder, picNameEnd=picExtremo)
break
else:
maxTry3 -= 1
else:
maxTry3 -= 1
return result
def getPicMiddle (self, picStart, picEnd):
result = list()
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picStart and picDistance != picEnd:
distance = self.distancePic2Line2Pics(picDistance, picStart, picEnd)
if distance < 0.5:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
return result
def getPicVecinosCorner (self, picStart, picEnd):
result = list()
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picStart and picDistance != picEnd:
distance = self.distancePic2Line2Pics(picDistance, picStart, picEnd)
#print("start = " + picStart + " end = " + picEnd + " distance " + str(distance) + " to " + picDistance)
if distance > 0.5 and distance < 1.0:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
return result
def get4PicsCorner(self, picName):
result = list()
result.append(picName)
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picName:
distance = self.distance2Pics(picDistance, picName)
print("picDistance = " + picDistance + " distance " + str(distance) + " to " + picName)
if distance < 1.65:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
#print(str(result))
#quit()
return result
def getPicExtremo (self, picName):
result = None
if picName is not None:
picIdx = int(picName[3:]) - 1
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
idxCorner1 = 0
idxCorner2 = (sizeBorder - 1)
idxCorner3 = (numPhotos - 1)
idxCorner4 = (numPhotos - sizeBorder)
div = int(picIdx / sizeBorder)
div1 = int((picIdx + 1) / sizeBorder)
mod = int(picIdx % sizeBorder)
mod1 = int((picIdx + 1) % sizeBorder)
corner = True if (picIdx == idxCorner1 or picIdx == idxCorner2 or picIdx == idxCorner3 or picIdx == idxCorner4) else False
vertical = True if div == 0 or div == (sizeBorder - 1) else False
horizontal = True if mod == 0 or mod1 == 0 else False
'''
print("picIdx = " + str(picIdx))
print("numPhotos = " + str(numPhotos))
print("sizeBorder = " + str(sizeBorder))
print("corner = " + str(corner))
print("vertical = " + str(vertical))
print("horizontal = " + str(horizontal))
print("div = " + str(picIdx / sizeBorder))
print("mod = " + str(picIdx % sizeBorder))
'''
resultIdx = None
if corner:
listCorners = [idxCorner1, idxCorner2, idxCorner3, idxCorner4]
listCorners.remove(picIdx)
resultIdx = random.choice(listCorners)
elif vertical:
resultIdx = picIdx + (numPhotos - sizeBorder) if picIdx < sizeBorder else picIdx - (numPhotos - sizeBorder)
elif horizontal:
resultIdx = picIdx + (sizeBorder - 1) if mod == 0 else picIdx - (sizeBorder - 1)
if resultIdx is not None:
result = 'pic' + str((resultIdx + 1))
return result
def getPicExtremoCorner (self, picName):
result = None
if picName is not None:
picIdx = int(picName[3:]) - 1
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
idxCorner1 = 0
idxCorner2 = (sizeBorder - 1)
idxCorner3 = (numPhotos - 1)
idxCorner4 = (numPhotos - sizeBorder)
valid = False
valid = valid or (picIdx == idxCorner1)
valid = valid or (picIdx == idxCorner2)
valid = valid or (picIdx == idxCorner3)
valid = valid or (picIdx == idxCorner4)
if valid:
resultIdx = None
resultIdx = idxCorner1 if picIdx == idxCorner3 else resultIdx
resultIdx = idxCorner3 if picIdx == idxCorner1 else resultIdx
resultIdx = idxCorner2 if picIdx == idxCorner4 else resultIdx
resultIdx = idxCorner4 if picIdx == idxCorner2 else resultIdx
if resultIdx is not None:
result = 'pic' + str((resultIdx + 1))
return result
def getPendiente2Pics (self, picName1, picName2):
result = None
if picName1 is not None and picName2 is not None:
import bpy
pic1 = bpy.data.objects[picName1]
pic2 = bpy.data.objects[picName2]
try:
result = (pic2.location.y - pic1.location.y) / (pic2.location.x - pic1.location.x)
except:
pass
return result
def distancePic2Line2Pics (self, picName, picNameStart, picNameEnd):
result = None
if picName is not None and picNameStart is not None and picNameEnd is not None:
import bpy
pic = bpy.data.objects[picName]
picStart = bpy.data.objects[picNameStart]
picEnd = bpy.data.objects[picNameEnd]
point = (pic.location.x, pic.location.y)
linePoint1 = (picStart.location.x, picStart.location.y)
linePoint2 = (picEnd.location.x, picEnd.location.y)
result = self.distancePoint2Line2P(point, linePoint1, linePoint2)
return result
def distance2Pics (self, picName1, picName2):
result = None
if picName1 is not None and picName2 is not None:
import bpy
pic1 = bpy.data.objects[picName1]
pic2 = bpy.data.objects[picName2]
result = math.sqrt(math.pow((pic2.location.x - pic1.location.x), 2) + math.pow((pic2.location.y - pic1.location.y), 2))
return result
def distancePoint2Line2P (self, point, linePoint1, linePoint2):
result = None
if point is not None and linePoint1 is not None and linePoint2 is not None:
#recta y = mx + b
mPendiente = (linePoint2[1] - linePoint1[1]) / (linePoint2[0] - linePoint1[0])
b = linePoint1[1] - (mPendiente * linePoint1[0])
distance = (math.fabs((mPendiente*point[0]) - point[1] + b)) / (math.sqrt(math.pow(mPendiente, 2) + 1))
result = distance
return result
def getCornerPictures (self):
result = None
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
picCorner1 = 'pic1'
picCorner2 = 'pic' + str(sizeBorder)
picCorner3 = 'pic' + str(numPhotos)
picCorner4 = 'pic' + str(numPhotos - sizeBorder + 1)
result = [picCorner1, picCorner2, picCorner3, picCorner4]
return result
def getExternPictures (self):
result = None
numPhotos = len(self.slideshow.photos)
print("numPhotos = " + str(numPhotos))
sizeBorder = int(math.sqrt(numPhotos))
print("sizeBorder = " + str(sizeBorder))
result = list()
for i in range(0, numPhotos):
col = int(i / sizeBorder)
print("col = " + str(col))
valid = False
if col == 0:
valid = True
elif col == (sizeBorder - 1):
valid = True
elif (i % sizeBorder) == 0 or ((i + 1) % sizeBorder) == 0:
valid = True
if valid:
picName = 'pic' + str((i + 1))
result.append(picName)
return result
def doAnimSceneSequential (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
#print("NUM PHOTOS = " + str(numPhotos))
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picName = 'pic' + str(startIdx)
self.showSequentialPhoto(picName, duration=120)
#print("EXTERN PICTURES = " + str(self.getExternPictures()))
frameEnd = self.frame
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def showSequentialPhoto (self, picName, duration=120):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
pic = bpy.data.objects[picName]
initZ = 2.5
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.001, 0.001)
cam.location.y = pic.location.y + random.uniform(-0.001, 0.001)
cam.location.z = pic.location.z + initZ + 0.01
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2) - 6)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2) - 6)
cam.location.x = pic.location.x + random.uniform(-0.001, 0.001)
cam.location.y = pic.location.y + random.uniform(-0.001, 0.001)
cam.location.z = pic.location.z + initZ - 0.01
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 6)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 6)
self.frame = self.frame + duration
def doAnimSceneDuration (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
frameEnd = 8 * 120
#renderOneFrame(50)
self.showDeleiteDuration(duration=120)
self.showDeleiteDuration(duration=120)
self.showZoomInOutDuration(duration=120)
self.showZoomInOutDuration(duration=120)
self.showRowColumnDuration(duration=120)
self.showRowColumnDuration(duration=120)
self.showSlideshowDuration(duration=120)
self.showSlideshowDuration(duration=120)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def doAnimScene (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
pps = 1.0
fps = self.fps
frameEnd = numPhotos * pps * fps
#renderOneFrame(50)
self.showDeleite(numPhotos, frameEnd)
self.showDeleite(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showRowColumn(numPhotos, frameEnd)
self.showRowColumn(numPhotos, frameEnd)
self.showSlideshow(numPhotos, frameEnd)
self.showSlideshow(numPhotos, frameEnd)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def doAnimSceneTitle (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
pps = 1.0
fps = self.fps
frameEnd = numPhotos * pps * fps
#renderOneFrame(50)
self.showDeleite(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showDeleite(numPhotos, frameEnd)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def animSceneDuration (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneDuration", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneDuration(folderImages, movieOutput)
return result
def animSceneSequential (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneSequential", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneSequential(folderImages, movieOutput)
return result
def animSceneDeleiteAllPhotos (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneDeleiteAllPhotos", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneDeleiteAllPhotos(folderImages, movieOutput)
return result
def animSlideshow (self, folderImages, time=None, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty_background.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSlideshow", [folderImages, time], movieOutput=movieOutput)
else:
result = self.doAnimSlideshow(folderImages, time, movieOutput)
return result
def animScene (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animScene", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimScene(folderImages, movieOutput)
return result
def animSceneTitle (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "doAnimSceneTitle", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneTitle(folderImages, movieOutput)
return result
def animSceneTitleItem (self, folderImages, durationFrames=120, mode='project', movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty_background.blend', 'templates')
result = self.runMethodBlender(templatePath, "doAnimSceneTitleItem", [folderImages, durationFrames, mode], movieOutput=movieOutput)
else:
result = self.doAnimSceneTitleItem(folderImages=folderImages, durationFrames=durationFrames, mode=mode, movieOutput=movieOutput)
return result
def doAnimSceneTitleItem (self, folderImages, durationFrames=120, mode='project', movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
#filepath imgBackground
#bpy.context.scene.node_tree.nodes['imgBackground'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background.jpg'
bpy.data.images['background'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background2.jpg'
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
#renderOneFrame(50)
if mode == 'project':
self.showDeleiteOnePhotoProject(durationFrames)
elif mode == 'section':
self.showDeleiteOnePhotoSection(durationFrames)
else:
self.showDeleiteOnePhoto(durationFrames)
result = self.saveMovie(frameStart=1, frameEnd=durationFrames, movieOutput=movieOutput)
return result
if __name__ == '__main__':
director = Director()
director.runMode = 'LOW'
director.verbose = True
director.forceFullRender = True
director.sortPhotos = True
#director.forceFrameEnd = 6
#out = director.animScene("/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
#print(str(out))
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/3x2")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSlideshow("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSlideshow("/home/jmramoss/hd/res_slideshow/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
out = director.animSlideshow("/media/jmramoss/TOSHIBA EXT13/res_slideshow/unai_colegio_primaria/Tutoria_2A_2018_2019/02/jpg/.bak")
print(str(out))
#director.addBgSound("/media/jmramoss/ALMACEN/mp3/Bruno_Mars_-_24K_Magic_Official_Video[myplaylist-youtubemp3.com].mp3", "metal")
#director.saveMovie(True)
| python |
from collections import defaultdict
import codecs
import csv
import json
by_verb = defaultdict(set)
with codecs.open('data.csv', encoding='utf-8', errors='ignore') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
verbs = [
v.strip()
for v_semi in row['verb'].lower().split(';')
for v in v_semi.split(',')
if v.strip() and v not in ('na', 'n/a')
]
for v in verbs:
by_verb[v].add(row['reqId'])
req_set = {reqId for reqs in by_verb.values() for reqId in reqs}
nodes = [{'id': v, 'label': v, 'color': 'red'} for v in by_verb]
nodes.extend({'id': req, 'label': req, 'color': 'lightblue'}
for req in req_set)
edges = [{'from': v, 'to': req}
for v, reqs in by_verb.items() for req in reqs]
print("var data = {")
print("nodes: new vis.DataSet({0}),".format(json.dumps(nodes)))
print("edges: new vis.DataSet({0})".format(json.dumps(edges)))
print("};")
| python |
# import numpy as np
#
# ranNUm1 = np.random.random([2,3])
# print(ranNUm1)
# print(type(ranNUm1))#<class 'numpy.ndarray'> 内部元组数据类型必须一直
#
# arrTest = np.arange(32)
# print(arrTest)
# print(arrTest.reshape([4 , 8]))
| python |
class Solution:
def solve(self, digits):
map = {
'2':'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
output = []
def helper(combination, digit):
if not len(digit):
output.append(combination)
return
d = digit[0]
for letter in map[d]:
helper(combination + letter, digit[1:])
helper("", digits)
return output
| python |
import random
import pickle
import torch
from torch import nn
class EncDecNetwork(nn.Module):
def __init__(self, encoder, decoder):
super(EncDecNetwork, self).__init__()
self.encoder = encoder
self.decoder = decoder
self._cuda = False
def full_forward(self):
raise NotImplementedError
def translate(self):
raise NotImplementedError
def cuda(self):
super(EncDecNetwork, self).cuda()
self.encoder.cuda()
self.decoder.cuda()
self._cuda = True
def initialize_params(self, init_range):
for p in self.parameters():
p.data.uniform_(-init_range, init_range)
def save_config_data(self, path):
checkpoint_data = self.get_checkpoint_data()
with open(path, 'wb') as f:
pickle.dump(checkpoint_data, f, -1)
def get_checkpoint_data(self):
raise NotImplementedError('get_checkpoint_data should be implemented by class that inherits EncDecNetwork')
| python |
from opentrons import protocol_api
import json
import os
import math
import threading
from time import sleep
metadata = {'apiLevel': '2.5'}
NUM_SAMPLES = 24
SAMPLE_VOLUME = 475
def run(protocol: protocol_api.ProtocolContext):
source = protocol.load_labware('starlab_96_wellplate_2000ul', 2)
dest = protocol.load_labware('starlab_96_wellplate_2000ul', 3)
tiprack_1 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 6)
m300 = protocol.load_instrument('p300_multi_gen2', 'left', tip_racks=[tiprack_1])
s = source.wells_by_name()['A1']
side = 1
loc = s.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
d = dest.wells_by_name()['A12']
m300.transfer(SAMPLE_VOLUME, loc, d)
| python |
#!/usr/bin/python
# -*- coding: utf-8 -*-###
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
'''
This script deletes all hosts and services except the localhost and services related localhost.
'''
import json
import requests
from time import sleep
def apply_config_and_restart_nagios(nagiosDetails):
retCode = 0
# Actual command :-
# response = requests.post('http://10.188.239.22/nagiosxi/api/v1/system/applyconfig?apikey=WhNXoMABXiR7WMNO3RMN6a34oGPp6TY2qLg8NPY0868k9U9r3be8kgrLVhahq8Da')
URI = "http://" + nagiosDetails["nagiosHost"] + "/nagiosxi/api/v1/system/applyconfig?apikey=" + nagiosDetails["apikey"]
# Apply config URI (used to restart Nagios XI)
print("Restarting nagios after deleting config")
response = requests.post(URI)
retVal = int(response.status_code)
retStat = json.loads(response.text)
#print(retStat)
try:
status = retStat["success"]
print("Restart Nagios with retVal :- " + str(retVal) + " and retStat :- " + str(retStat))
retCode = 0
except:
status = retStat["error"]
print("Restart Nagios with retVal :- " + str(retVal) + " and retStat :- " + str(retStat) + ". Plugin exiting.")
retCode = 1 # Something is wrong.
sleep(1)
sys.exit(1) # Exit for now.
return retCode
def delete_all_services_except_localhost_services(nagiosDetails):
# Get a list of all services to delete them one by one - Do not delete services of localhost
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/objects/servicestatus'
print("Get service list URI = ", URI)
response = requests.get(URI, params=params)
response = json.loads(response.content)
print("Num services - " + str(response["recordcount"]) )
serviceList = response["servicestatus"]
for service in serviceList:
# Do not delete services of localhost
if service["host_name"] == 'localhost':
continue
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
('host_name', service["host_name"]),
('service_description', service["name"]),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/config/service'
print("Delete service URI = ", URI, "Deleting service - ", service["name"])
#sleep(5)
response = requests.delete(URI, params=params)
sleep(0.1)
return 0
def delete_all_hosts_except_localhost(nagiosDetails):
# Get a list of all hosts to delete them one by one - Do not delete localhost
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/objects/hoststatus'
print("Get host list URI = ", URI)
response = requests.get(URI, params=params)
response = json.loads(response.content)
print("Num hosts - " + str(response["recordcount"]) )
hostList = response["hoststatus"]
# JSON format differs if it is a single entry.
if int(response["recordcount"]) == 1:
print("Not deleting localhost")
return 0
else:
for host in hostList:
# Do not delete localhost
print("Hostname = ", host["name"])
if host["name"] == 'localhost':
continue
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
('host_name', host["name"])
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/config/host'
response = requests.delete(URI, params=params)
print("Delete host URI = ", URI, "Deleting host - ", host["name"])
#sleep(5)
response = requests.delete(URI, params=params)
sleep(0.1)
return 0
if __name__ == '__main__':
import sys
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser(add_help=True, description='Usage')
parser.add_argument('-i','--input_file',dest='input_file', required=True,
help='Json file containing oneview and nagios details used for testing main module')
# Check and parse the input arguments into python's format
input = parser.parse_args()
with open(input.input_file) as data_file:
inputConfig = json.load(data_file)
nagiosDetails = inputConfig["nagios_config"]
delete_all_services_except_localhost_services(nagiosDetails)
apply_config_and_restart_nagios(nagiosDetails)
sleep(5)
delete_all_hosts_except_localhost(nagiosDetails)
apply_config_and_restart_nagios(nagiosDetails)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.