metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jopplk/trumpRallies",
"score": 3
} |
#### File: Jopplk/trumpRallies/compileData.py
```python
import pandas as pd
import os
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
PATH_2016 = 'data/2016Campaign/raw/'
PATH_POST = 'data/postElection/raw/'
def collectData(folderPath):
data = pd.DataFrame()
for file in os.listdir(folderPath):
data = data.append(pd.read_csv(folderPath + file), ignore_index=True)
return data
def concatColumns(df, fName):
# For some reason f-strings no work?
# df[fName] = [f"{df['Venue']}, {df['City']}, {df['State']}"]
df[fName] = df['City'] + ', ' + df['State']
data2016 = collectData(PATH_2016)
dataPost = collectData(PATH_POST)
concatColumns(data2016, 'concatAddress')
concatColumns(dataPost, 'concatAddress')
locator = Nominatim(user_agent="Personal geocoding script")
geocoder = RateLimiter(locator.geocode, min_delay_seconds=1)
def geocode_df(df, geocoderObj):
df['location'] = df['concatAddress'].apply(geocoderObj)
df['point'] = df['location'].apply(lambda location: tuple(location.point))
df['finalAddress'] = df['location'].apply(lambda location: location.address)
df[['latitude', 'longitude', 'altitude']] = pd.DataFrame(df['point'].tolist(), index=df.index)
geocode_df(data2016, geocoder)
geocode_df(dataPost, geocoder)
#Special cases ----------------------
def agg(dfFile, column):
data = pd.read_csv(dfFile)
aggData = data.groupby(column)[column].count()
return aggData
def fix(df, index, geoObj):
# [:8] for Bangor, [:10] for Portland
data = geoObj.geocode(df.at[index, 'concatAddress'][:10] + 'Maine')
df.at[index, 'location'] = str(data)
df.at[index, 'point'] = tuple(data.point)
df.at[index, 'finalAddress'] = data.address
df.at[index, 'latitude'] = tuple(data.point)[0]
df.at[index, 'longitude'] = tuple(data.point)[1]
```
#### File: Jopplk/trumpRallies/foliumMap.py
```python
import pandas as pd
import folium
from folium.features import Choropleth
import json
def pointMap(filename, kwargs):
foliumMap = folium.Map(**kwargs)
data2016 = pd.read_csv('data/2016Campaign/data2016.csv')
dataPost = pd.read_csv('data/postElection/dataPost.csv')
data2016.apply(lambda row: folium.CircleMarker(
(row['latitude'], row['longitude']),
radius=4,
weight=.5,
color='blue',
fill_color='blue',
fill_opacity='.25').add_to(foliumMap), axis=1)
dataPost.apply(lambda row: folium.CircleMarker(
(row['latitude'], row['longitude']),
radius=4,
weight=.5,
color='red',
fill_color='red',
fill_opacity='.25').add_to(foliumMap), axis=1)
foliumMap.save(filename)
def choroplethMap(filename, kwargs):
def agg(dfFile):
data = pd.read_csv(dfFile)
aggData = data.groupby('State')['Venue'].count()
return aggData
foliumMap = folium.Map(**kwargs)
data2016agg = agg('data/2016Campaign/data2016.csv')
dataPostagg = agg('data/postElection/dataPost.csv')
bothAgg = data2016agg.combine(
dataPostagg, lambda x, y: x + y, fill_value=0)
with open('data/us-states.json') as f:
states = json.load(f)
Choropleth(geo_data=states,
# data=bothAggdf, columns=['State', 'Venue'],
data=bothAgg,
key_on='feature.properties.name',
fill_color='YlGn',
fill_opacity=.6,
legend_name='Number of Rallies'
).add_to(foliumMap)
folium.LayerControl().add_to(foliumMap)
foliumMap.save(filename)
mapSettings = dict(
tiles='cartodbpositron',
location=[39.8283, -98.5795],
zoom_start=4.75)
pointMap('pointF.html', mapSettings)
choroplethMap('choroF.html', mapSettings)
``` |
{
"source": "joprice/mongo",
"score": 2
} |
#### File: python/wiredtiger/intpacking.py
```python
import math, struct
# Variable-length integer packing
# need: up to 64 bits, both signed and unsigned
#
# Try hard for small values (up to ~2 bytes), after that, just encode the
# length in the first byte.
#
# First byte | Next | |
# byte | bytes| Min Value | Max Value
# ------------+------+------------------------+--------------------------------
# [00 00xxxx] | free | N/A | N/A
# [00 01llll] | 8-l | -2^64 | -2^13 - 2^6
# [00 1xxxxx] | 1 | -2^13 - 2^6 | -2^6 - 1
# [01 xxxxxx] | 0 | -2^6 | -1
# [10 xxxxxx] | 0 | 0 | 2^6 - 1
# [11 0xxxxx] | 1 | 2^6 | 2^13 + 2^6 - 1
# [11 10llll] | l | 2^14 + 2^7 | 2^64 - 1
# [11 11xxxx] | free | N/A | N/A
NEG_MULTI_MARKER = 0x10
NEG_2BYTE_MARKER = 0x20
NEG_1BYTE_MARKER = 0x40
POS_1BYTE_MARKER = 0x80
POS_2BYTE_MARKER = 0xc0
POS_MULTI_MARKER = 0xe0
NEG_1BYTE_MIN = -2**6
NEG_2BYTE_MIN = -2**13 + NEG_1BYTE_MIN
POS_1BYTE_MAX = 2**6 - 1
POS_2BYTE_MAX = 2**13 + POS_1BYTE_MAX
MINUS_BIT = -1 << 64
UINT64_MASK = 0xffffffffffffffff
def getbits(x, start, end=0):
'''return the least significant bits of x, from start to end'''
return (x & ((1 << start) - 1)) >> (end)
def get_int(b, size):
r = 0;
for i in xrange(size):
r = (r << 8) | ord(b[i])
return r
def pack_int(x):
if x < NEG_2BYTE_MIN:
packed = struct.pack('>Q', x & UINT64_MASK)
while packed and packed[0] == '\xff':
packed = packed[1:]
return chr(NEG_MULTI_MARKER | getbits(8 - len(packed), 4)) + packed
elif x < NEG_1BYTE_MIN:
x -= NEG_2BYTE_MIN
return chr(NEG_2BYTE_MARKER | getbits(x, 13, 8)) + chr(getbits(x, 8))
elif x < 0:
x -= NEG_1BYTE_MIN
return chr(NEG_1BYTE_MARKER | getbits(x, 6))
elif x <= POS_1BYTE_MAX:
return chr(POS_1BYTE_MARKER | getbits(x, 6))
elif x <= POS_2BYTE_MAX:
x -= (POS_1BYTE_MAX + 1)
return chr(POS_2BYTE_MARKER | getbits(x, 13, 8)) + chr(getbits(x, 8))
else:
packed = struct.pack('>Q', x - (POS_2BYTE_MAX + 1))
while packed and packed[0] == '\x00':
packed = packed[1:]
return chr(POS_MULTI_MARKER | getbits(len(packed), 4)) + packed
def unpack_int(b):
marker = ord(b[0])
if marker < NEG_2BYTE_MARKER:
sz = 8 - getbits(marker, 4)
return ((-1 << (sz << 3)) | get_int(b[1:], sz), b[sz+1:])
elif marker < NEG_1BYTE_MARKER:
return (NEG_2BYTE_MIN + ((getbits(marker, 5) << 8) | ord(b[1])), b[2:])
elif marker < POS_1BYTE_MARKER:
return (NEG_1BYTE_MIN + getbits(marker, 6), b[1:])
elif marker < POS_2BYTE_MARKER:
return (getbits(marker, 6), b[1:])
elif marker < POS_MULTI_MARKER:
return (POS_1BYTE_MAX + 1 + ((getbits(marker, 5) << 8) | ord(b[1])), b[2:])
else:
sz = getbits(marker, 4)
return (POS_2BYTE_MAX + 1 + get_int(b[1:], sz), b[sz+1:])
# Sanity testing
if __name__ == '__main__':
import random
for big in (100, 10000, 1 << 40, 1 << 64):
for i in xrange(1000):
r = random.randint(-big, big)
print "\rChecking %d" % r,
if unpack_int(pack_int(r))[0] != r:
print "\nFound a problem with %d" % r
break
print
for i in xrange(1000):
r1 = random.randint(-big, big)
r2 = random.randint(-big, big)
print "\rChecking %d, %d" % (r1, r2),
if cmp(r1, r2) != cmp(pack_int(r1), pack_int(r2)):
print "\nFound a problem with %d, %d" % (r1, r2)
break
print
``` |
{
"source": "joprice/rules_reason",
"score": 2
} |
#### File: private/ocaml/compile.bzl
```python
load(
"//reason/private:extensions.bzl",
"CMXA_EXT",
"CMI_EXT",
"CMO_EXT",
"CMX_EXT",
"C_EXT",
"H_EXT",
"MLI_EXT",
"ML_EXT",
"O_EXT",
)
load(
"//reason/private:providers.bzl",
"MlCompiledModule",
)
load(
":utils.bzl",
"TARGET_BYTECODE",
"TARGET_NATIVE",
"select_compiler",
)
def ocaml_compile_library(
ctx,
arguments,
c_sources,
ml_sources,
outputs,
runfiles,
sorted_sources,
toolchain,
):
"""
Compile a given set of OCaml .ml and .mli sources to their .cmo, .cmi, and
.cmx counterparts.
"""
ctx.actions.run_shell(
inputs=runfiles,
outputs=outputs,
tools=[
toolchain.ocamlc,
toolchain.ocamlopt,
],
command="""\
#!/bin/bash
# Compile .cmi and .cmo files
{_ocamlc} {arguments} $(cat {ml_sources})
# Compile .cmx files
{_ocamlopt} {arguments} $(cat {ml_sources}) {c_sources}
mkdir -p {output_dir}
# C sources will be compiled and put at the top level
find . -maxdepth 1 \
-name "*.o" \
-exec cp {{}} {output_dir}/ \;
find {source_dir} \
-name "*.cm*" \
-exec cp {{}} {output_dir}/ \;
find {source_dir} \
-name "*.o" \
-exec cp {{}} {output_dir}/ \;
cp -f $(cat {ml_sources}) {output_dir}/;
""".format(
_ocamlc=toolchain.ocamlc.path,
_ocamlopt=toolchain.ocamlopt.path,
arguments=" ".join(arguments),
c_sources=" ".join([c.path for c in c_sources]),
ml_sources=sorted_sources.path,
output_dir=outputs[0].dirname,
source_dir=ml_sources[0].dirname,
),
mnemonic="OCamlCompileLib",
progress_message="Compiling ({_in}) to ({out})".format(
_in=", ".join([s.basename for s in ml_sources] +
[c.basename for c in c_sources]),
out=", ".join([s.basename for s in outputs]),
),
)
def ocaml_compile_binary(
ctx,
arguments,
base_libs,
binfile,
c_deps,
c_sources,
deps,
ml_sources,
runfiles,
sorted_sources,
target,
toolchain,
):
"""
Compile a given set of OCaml .ml and .mli sources to a single binary file
Args:
ctx: the context argument from the rule invoking this macro
arguments: a list of string representing the compiler flags
base_libs: a list of target objects from the OCaml stdlib to link against
binfile: the binary file target
c_deps: a list of transitive C dependency targets
c_sources: depset of C sources for this binary
deps: a list of transitive ML dependency targets
ml_sources: a depset of ML sources for this binary
runfiles: list of all the files that need to be present at runtime
sorted_sources: a file target with ML sources in topological order
target: whether to compile to a native or bytecode binary
toolchain: the OCaml toolchain
"""
compiler = select_compiler(toolchain, target)
# Native binaries expect .cmx files while bytecode binaries expect .cmo
expected_object_ext = CMX_EXT
if target == TARGET_BYTECODE:
expected_object_ext = CMO_EXT
dep_libs = []
for d in deps:
name = d.basename
if ML_EXT in name or MLI_EXT in name:
dep_libs.extend([d])
# Extract all .cmxa baselib dependencies to include in linking
stdlib_libs = []
for baselib in base_libs:
if CMXA_EXT in baselib.basename:
stdlib_libs += [baselib]
ctx.actions.run_shell(
inputs=runfiles,
outputs=[binfile],
tools=[
toolchain.ocamlc,
toolchain.ocamlopt,
toolchain.ocamldep,
],
command="""\
#!/bin/bash
# Run ocamldep on all of the ml and mli dependencies for this binary
{_ocamldep} \
-sort \
$(echo {dep_libs} | tr " " "\n" | grep ".ml*") \
> .depend.all
# Extract only the compiled cmx files to use as input for the compiler
cat .depend.all \
| tr " " "\n" \
| grep ".ml$" \
| sed "s/\.ml.*$/{expected_object_ext}/g" \
| xargs \
> .depend.cmx
{_compiler} {arguments} \
{c_objs} \
{base_libs} \
$(cat .depend.cmx) $(cat {ml_sources}) {c_sources}
mkdir -p {output_dir}
find {source_dir} -name "{pattern}" -exec cp {{}} {output_dir}/ \;
""".format(
_compiler=compiler.path,
_ocamldep=toolchain.ocamldep.path,
arguments=" ".join(arguments),
base_libs=" ".join([b.path for b in stdlib_libs]),
c_objs=" ".join([o.path for o in c_deps]),
c_sources=" ".join([c.path for c in c_sources]),
expected_object_ext=expected_object_ext,
dep_libs=" ".join([l.path for l in dep_libs]),
ml_sources=sorted_sources.path,
output_dir=binfile.dirname,
pattern=binfile.basename,
source_dir=ml_sources[0].dirname,
),
mnemonic="OCamlCompileBin",
progress_message="Compiling ({_in}) to ({out})".format(
_in=", ".join([s.basename for s in ml_sources] +
[c.basename for c in c_sources]),
out=binfile.basename),
)
```
#### File: private/ocaml/ocaml_module.bzl
```python
load(
"//reason/private:extensions.bzl",
"CM_EXTS",
"CMA_EXT",
"CMXA_EXT",
"MLI_EXT",
"ML_EXT",
)
load(
"//reason/private:providers.bzl",
"MlCompiledModule",
"CCompiledModule",
)
load(
":ocamldep.bzl",
_ocamldep="ocamldep",
)
load(
":utils.bzl",
_build_import_paths="build_import_paths",
_declare_outputs="declare_outputs",
_find_base_libs="find_base_libs",
_gather_files="gather_files",
_group_sources_by_language="group_sources_by_language",
_stdlib="stdlib",
)
load(
":compile.bzl",
_ocaml_compile_library="ocaml_compile_library",
)
def _ocaml_module_impl(ctx):
name = ctx.attr.name
toolchain = ctx.attr.toolchain[platform_common.ToolchainInfo]
# Get standard library files and path
(stdlib, stdlib_path) = _stdlib(toolchain)
base_libs = _find_base_libs(stdlib, ctx.attr.base_libs)
# Get all sources needed for compilation
(sources, imports, deps, c_deps, stdlib_deps) = _gather_files(ctx)
# Split sources for sorting
(ml_sources, c_sources) = _group_sources_by_language(sources)
# Run ocamldep on the ML sources to compile in right order
sorted_sources = _ocamldep(ctx, name, ml_sources, toolchain)
# Declare outputs
(ml_outputs, c_outputs) = _declare_outputs(ctx, sources)
outputs = ml_outputs + c_outputs
# Build runfiles
runfiles = []
runfiles.extend([sorted_sources])
runfiles.extend(sources)
runfiles.extend(deps)
runfiles.extend(stdlib)
# Compute import paths
import_paths = _build_import_paths(imports, stdlib_path)
arguments = ["-color", "always"] + import_paths + ["-c"]
_ocaml_compile_library(
ctx=ctx,
arguments=arguments,
outputs=outputs,
runfiles=runfiles,
sorted_sources=sorted_sources,
ml_sources=ml_sources,
c_sources=c_sources,
toolchain=toolchain,
)
return [
DefaultInfo(
files=depset(outputs),
runfiles=ctx.runfiles(files=runfiles),
),
MlCompiledModule(
name=ctx.attr.name,
srcs=ml_sources,
deps=deps,
base_libs=base_libs,
outs=ml_outputs,
),
CCompiledModule(
name=ctx.attr.name,
srcs=c_sources,
outs=c_outputs,
),
]
ocaml_module = rule(
attrs={
"srcs":
attr.label_list(
allow_files=[ML_EXT, MLI_EXT],
mandatory=True,
),
"deps":
attr.label_list(
allow_files=False,
default=[],
),
"base_libs":
attr.string_list(default=[]),
"toolchain":
attr.label(
# TODO(@ostera): rename this target to managed-platform
default="//reason/toolchain:bs-platform",
providers=[platform_common.ToolchainInfo],
),
},
implementation=_ocaml_module_impl,
)
```
#### File: private/opam/init.bzl
```python
def _ocamlrun(ctx):
executable = ctx.actions.declare_file(ctx.attr.name)
bytecode = ctx.file.src
ocamlrun = ctx.file._ocamlrun
template = ctx.file._runscript
ctx.actions.expand_template(
template=template,
output=executable,
substitutions={
"{ocamlrun}": ocamlrun.path,
"{bytecode}": bytecode.path,
},
is_executable=True,
)
runfiles = [ocamlrun, bytecode]
return [
DefaultInfo(
runfiles=ctx.runfiles(files=runfiles),
executable=executable,
),
]
ocamlrun = rule(
attrs={
"src":
attr.label(
allow_single_file=True,
mandatory=True,
),
"_ocamlrun":
attr.label(
default="//reason/private/opam:ocamlrun",
allow_single_file=True,
),
"_runscript":
attr.label(
default="//reason/private/opam:ocamlrun.tpl",
allow_single_file=True,
),
},
implementation=_ocamlrun,
executable=True)
def init_opam(ocaml_version="4.02.3+buckle-master"):
"""
Macro to initialize opam with the given OCaml version and extract the necessary
binaries and archives for the toolchain.
Args:
ocaml_version (string): a valid ocaml version, installable with opam
"""
native.genrule(
name="init_opam",
srcs=["@opam"],
outs=["opam_root.tar"],
cmd="""\
#!/bin/bash
# compute this package's root directory
pkg_root=$$(dirname $(location :opam_root.tar))
abs_pkg_root=$$(pwd)/$$pkg_root
opam=$(location @opam//:opam)
# make sure the path is good
mkdir -p $$abs_pkg_root;
# initialize opam
OPAMROOT=$$abs_pkg_root $$opam init --comp {ocaml_version};
# package the opam root
tar --transform "s=$$pkg_root/==g" \
--create $$pkg_root \
--dereference \
> $(location :opam_root.tar);
""".format(ocaml_version=ocaml_version),
)
native.genrule(
name="extract_binaries",
srcs=[":opam_root.tar"],
outs=[
"ocaml_stdlib.tar",
"ocamlc.byte",
"ocamldep.byte",
"ocamlopt.byte",
"ocamlrun",
],
cmd="""\
#!/bin/bash
tar --extract \
--file $(location :opam_root.tar) \
--directory $(@D);
ocaml_root=$(@D)/{ocaml_version}
asb_ocaml_root=$$(pwd)/$$ocaml_root
cp -f $$abs_ocaml_root/bin/ocamlc $(@D)/ocamlc.byte;
cp -f $$abs_ocaml_root/bin/ocamldep $(@D)/ocamldep.byte;
cp -f $$abs_ocaml_root/bin/ocamlopt $(@D)/ocamlopt.byte;
cp -f $$abs_ocaml_root/bin/ocamlrun $(@D)/ocamlrun;
# pack ml stdlib preserving paths
tar --transform "s=$$ocaml_root/==g" \
--create $$ocaml_root/lib/* \
--dereference \
> $(location ocaml_stdlib.tar);
""".format(ocaml_version=ocaml_version),
)
```
#### File: 3rdparty/opam/deps.bzl
```python
load(
"@com_github_ostera_rules_reason//reason:def.bzl",
"opam_package",
)
def declare_opam(dep):
opam_package(
archive=dep["archive"],
name="opam.%s" % dep["name"],
pkg_name=dep["pkg_name"],
pkg_version=dep["pkg_version"],
sha256=dep["sha256"],
type=dep["type"],
)
def deps():
return [
{
"name":
"bigstringaf",
"archive":
"https://github.com/inhabitedtype/bigstringaf/archive/0.2.0.tar.gz",
"deps": [],
"pkg_name":
"bigstringaf",
"pkg_version":
"0.2.0",
"sha256":
"98102997fbb3acc8f70fbfb4fb864a5bcc8964ab605d115307f1e6c49334fac8",
"type":
"tar.gz",
},
{
"name":
"angstrom",
"archive":
"https://github.com/inhabitedtype/angstrom/archive/0.10.0.tar.gz",
"deps": ["result", "bigstringaf"],
"pkg_name":
"angstrom",
"pkg_version":
"0.10.0",
"sha256":
"d73384483e8a2d9c6665acf0a4d6fa09e35075da0692e10183cb5589e1c9cf50",
"type":
"tar.gz",
},
{
"name":
"cmdliner",
"archive":
"http://erratique.ch/software/cmdliner/releases/cmdliner-1.0.2.tbz",
"deps": ["result"],
"pkg_name":
"cmdliner",
"pkg_version":
"1.0.2",
"sha256":
"414ea2418fca339590abb3c18b95e7715c1086a1f7a32713a492ba1825bc58a2",
"type":
"tar.bz2",
},
]
def declare_dependencies(rule=declare_opam):
for d in deps():
rule(d)
``` |
{
"source": "joprice/Weaver",
"score": 2
} |
#### File: Weaver/ponyd/downloader.py
```python
from cStringIO import StringIO
from contextlib import contextmanager
import os
import shutil
import tempfile
import urllib2
import zipfile
from ponyd.argbase import Arg
from ponyd.command import PonydCommand
from ponyd.constants import DEFAULT_DEVTOOLS_PATH
@contextmanager
def tempdir():
"""Makes a temp directory then deletes it when leaving the context"""
dir = tempfile.mkdtemp()
try:
yield dir
finally:
shutil.rmtree(dir)
LATEST_URL = "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2FLAST_CHANGE?alt=media"
TOOLS_URL_TEMPLATE = "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F{}%2Fchrome-linux.zip?alt=media"
INSPECTOR_PATH_PREFIX = 'chrome-linux/resources/inspector'
class Downloader(PonydCommand):
__subcommand__ = 'update-devtools'
dirname = Arg(nargs='?',
help='path to download and extract devtools (default: %s)' % DEFAULT_DEVTOOLS_PATH,
default=DEFAULT_DEVTOOLS_PATH)
latest = Arg('-l', '--latest',
help='install the lastest dev tools instead of a known good version',
action='store_true')
def __call__(self):
if self.latest:
version = urllib2.urlopen(LATEST_URL).read()
else:
# Protocol: https://chromium.googlesource.com/chromium/src/+/6f91eb9692b1fbb8839a0dbf9b0f3c5b10117b62/third_party/WebKit/Source/core/inspector/browser_protocol.json
version = 464644
tools_url = TOOLS_URL_TEMPLATE.format(version)
print "Downloading %s" % tools_url
tools_stream = StringIO(urllib2.urlopen(tools_url).read())
if os.path.exists(self.dirname):
print "Removing existing devtools installation at %s" % self.dirname
shutil.rmtree(self.dirname)
extract_dir = self.dirname
print "Extracting to %s" % extract_dir
tools_zip = zipfile.ZipFile(tools_stream, 'r')
names_to_extract = [n for n in tools_zip.namelist() if n.startswith(INSPECTOR_PATH_PREFIX)]
with tempdir() as d:
tools_zip.extractall(path=d, members=names_to_extract)
os.rename(os.path.join(d, INSPECTOR_PATH_PREFIX), extract_dir)
``` |
{
"source": "JoProvost/calbum",
"score": 2
} |
#### File: calbum/filters/album.py
```python
from calbum.core import model
from calbum.filters import MediaFilter
class CalendarAlbumFilter(MediaFilter):
def __init__(self, albums_path, events, save_events):
self.events = list(events)
self.albums_path = albums_path
self.save_events = save_events
def albums_for(self, media):
for event in self.events:
if media.timestamp() in event.time_period():
yield (model.Album.from_event(event, self.albums_path),
event)
def move(self, media):
album, event = next(self.albums_for(media), (None, None))
if album:
album.timeline().move(media)
if self.save_events:
event.save_to(album.path())
def link(self, media):
for album, event in self.albums_for(media):
album.timeline().link(media)
if self.save_events:
event.save_to(album.path())
```
#### File: tests/core/test_model.py
```python
from datetime import datetime
import unittest
from dateutil import tz
from hamcrest import assert_that, is_
import mock
from calbum.core import model
class TestFileSystemElement(unittest.TestCase):
@mock.patch('os.renames')
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('calbum.core.model.get_destination_path')
def test_move_to(self, get_destination_path, exists, remove, renames):
get_destination_path.return_value = 'dest/path.jpg'
exists.return_value = False
fse = model.FileSystemElement('origin/path.jpg')
fse.move_to('dest/path')
assert_that(fse.path(), is_('dest/path.jpg'))
get_destination_path.assert_called_with(
source='origin/path.jpg', dest='dest/path', extension='.jpg')
exists.assert_called_with('dest/path.jpg')
renames.assert_called_with('origin/path.jpg', 'dest/path.jpg')
assert_that(remove.called, is_(False))
@mock.patch('os.renames')
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('calbum.core.model.get_destination_path')
def test_move_to_same_file(self, get_destination_path, exists, remove, renames):
get_destination_path.return_value = 'dest/path.jpg'
exists.return_value = True
fse = model.FileSystemElement('origin/path.jpg')
fse.move_to('dest/path')
assert_that(fse.path(), is_('dest/path.jpg'))
get_destination_path.assert_called_with(
source='origin/path.jpg', dest='dest/path', extension='.jpg')
exists.assert_called_with('dest/path.jpg')
remove.assert_called_with('origin/path.jpg')
assert_that(renames.called, is_(False))
@mock.patch('os.link')
@mock.patch('os.makedirs')
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('calbum.core.model.get_destination_path')
def test_link_to(self, get_destination_path, exists, remove, makedirs, link):
get_destination_path.return_value = 'dest/path.jpg'
exists.return_value = False
fse = model.FileSystemElement('origin/path.jpg')
fse.link_to('dest/path')
assert_that(fse.path(), is_('origin/path.jpg'))
get_destination_path.assert_called_with(
source='origin/path.jpg', dest='dest/path', extension='.jpg')
exists.assert_has_calls([
mock.call('dest/path.jpg'),
mock.call('dest'),
])
makedirs.assert_called_with('dest')
link.assert_called_with('origin/path.jpg', 'dest/path.jpg')
assert_that(remove.called, is_(False))
@mock.patch('os.link')
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('calbum.core.model.get_destination_path')
def test_link_to_same_file(self, get_destination_path, exists, remove, link):
get_destination_path.return_value = 'dest/path.jpg'
exists.return_value = True
fse = model.FileSystemElement('origin/path.jpg')
fse.link_to('dest/path')
assert_that(fse.path(), is_('origin/path.jpg'))
get_destination_path.assert_called_with(
source='origin/path.jpg', dest='dest/path', extension='.jpg')
exists.assert_called_with('dest/path.jpg')
assert_that(link.called, is_(False))
assert_that(remove.called, is_(False))
@mock.patch('os.symlink')
@mock.patch('os.link')
@mock.patch('os.makedirs')
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('calbum.core.model.get_destination_path')
def test_link_to_make_symlink_on_error(self, get_destination_path, exists, remove, makedirs, link, symlink):
get_destination_path.return_value = 'dest/new_name.jpg'
exists.return_value = False
link.side_effect = OSError()
fse = model.FileSystemElement('\xc3\x80 trier/origin/path.jpg')
fse.link_to('dest/new_name')
assert_that(fse.path(), is_(u'\xc0 trier/origin/path.jpg'))
get_destination_path.assert_called_with(
source=u'\xc0 trier/origin/path.jpg', dest='dest/new_name', extension='.jpg')
exists.assert_has_calls([
mock.call('dest/new_name.jpg'),
mock.call('dest'),
])
makedirs.assert_called_with('dest')
link.assert_called_with(u'\xc0 trier/origin/path.jpg', 'dest/new_name.jpg')
symlink.assert_called_with(u'../\xc0 trier/origin/path.jpg', 'dest/new_name.jpg')
assert_that(remove.called, is_(False))
def test_file_extension_based_on_path(self):
fse = model.FileSystemElement('some/file/path.pdf')
assert_that(fse.file_extension(), is_('.pdf'))
class TestGetDestinationPath(unittest.TestCase):
@mock.patch('os.path.exists')
def test_get_destination_path_file_doesnt_exist(self, exists):
exists.return_value = False
assert_that(
model.get_destination_path(
source='origin/path.jpg', dest='dest/path', extension='.jpg'),
is_('dest/path.jpg'))
exists.assert_called_with('dest/path.jpg')
@mock.patch('filecmp.cmp')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_get_destination_path_file_exist_with_different_file(self, exists, samefile, filecmp):
samefile.return_value = False
filecmp.return_value = False
exists.side_effect = [
True,
False
]
assert_that(
model.get_destination_path(
source='origin/path.jpg', dest='dest/path', extension='.jpg'),
is_('dest/path(1).jpg'))
exists.has_calls([
mock.call('dest/path.jpg'),
mock.call('dest/path(1).jpg'),
])
@mock.patch('filecmp.cmp')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_get_destination_path_file_exist_with_different_file_twice(self, exists, samefile, filecmp):
samefile.return_value = False
filecmp.return_value = False
exists.side_effect = [
True,
True,
False
]
assert_that(
model.get_destination_path(
source='origin/path.jpg', dest='dest/path', extension='.jpg'),
is_('dest/path(2).jpg'))
exists.has_calls([
mock.call('dest/path.jpg'),
mock.call('dest/path(1).jpg'),
mock.call('dest/path(2).jpg'),
])
@mock.patch('filecmp.cmp')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_get_destination_path_file_exist_with_same_file_inode(self, exists, samefile, filecmp):
samefile.return_value = True
filecmp.return_value = False
exists.side_effect = [
True
]
assert_that(
model.get_destination_path(
source='origin/path.jpg', dest='dest/path', extension='.jpg'),
is_('dest/path.jpg'))
exists.has_calls([
mock.call('dest/path.jpg'),
])
@mock.patch('filecmp.cmp')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_get_destination_path_file_exist_with_same_file_content(self, exists, samefile, filecmp):
samefile.return_value = False
filecmp.return_value = True
exists.side_effect = [
True
]
assert_that(
model.get_destination_path(
source='origin/path.jpg', dest='dest/path', extension='.jpg'),
is_('dest/path.jpg'))
exists.has_calls([
mock.call('dest/path.jpg'),
])
class TestMedia(unittest.TestCase):
def test_file_extension_based_on_path(self):
fse = model.Media('some/file/path.pdf')
assert_that(fse.file_extension(), is_('.pdf'))
def test_file_extension_based_on_class(self):
class FakeGifMedia(model.Media):
file_extensions = ('.gif', '.gifa')
fse = FakeGifMedia('some/file/path.pdf')
assert_that(fse.file_extension(), is_('.gif'))
def test_timestamp_based_on_filename(self):
media = model.Media('some/file/VID_20120501_224323.avi')
assert_that(
media.timestamp(),
is_(datetime(2012, 5, 1, 22, 43, 23, tzinfo=tz.gettz())))
```
#### File: tests/sources/test_exiftool.py
```python
from datetime import datetime
import unittest
from dateutil import tz
from hamcrest import assert_that, is_, instance_of
from calbum.sources import exiftool
from tests import resources
class TestExifToolMedia(unittest.TestCase):
def setUp(self):
self.exiftool_path = exiftool.exiftool_path
def tearDown(self):
exiftool.exiftool_path = self.exiftool_path
def test_jpeg_timestamp(self):
assert_that(
exiftool.JpegPicture(resources.file_path('image-01.jpeg')).timestamp(),
is_(datetime(2012, 5, 1, 1, 0, 0, tzinfo=tz.gettz())))
def test_tiff_timestamp(self):
assert_that(
exiftool.TiffPicture(resources.file_path('image-03.tif')).timestamp(),
is_(datetime(2013, 2, 1, 3, 0, 0, tzinfo=tz.gettz())))
def test_mp4_timestamp(self):
assert_that(
exiftool.VideoMP4Media(resources.file_path('video-01.mp4')).timestamp(),
is_(datetime(2014, 1, 1, 19, 30, 0, tzinfo=tz.gettz())))
def test_3gp_timestamp(self):
assert_that(
exiftool.Video3GPMedia(resources.file_path('video-02.3gp')).timestamp(),
is_(datetime(2014, 2, 2, 19, 30, 0, tzinfo=tz.gettz())))
def test_mp4_timestamp_without_exiftool(self):
try:
exiftool.ExifToolMedia.exiftool_path = '__invalid_command__'
assert_that(
exiftool.VideoMP4Media(resources.file_path('video-01.mp4')).timestamp(),
instance_of(datetime))
except Exception as e:
raise AssertionError('Expected no exception but raised {}'.format(e))
``` |
{
"source": "joqueka/bf4py",
"score": 3
} |
#### File: bf4py/bf4py/derivatives.py
```python
from ._utils import _data_request, _search_request
from datetime import date, datetime, timezone, time
def trade_history(search_date:date):
"""
Returns the times/sales list of every traded derivative for given day.
Works for a wide range of dates, however details on instruments get less the more you move to history.
Parameters
----------
search_date : date
Date for which derivative trades should be received.
Returns
-------
tradelist : TYPE
A list of dicts with details about trade and instrument.
"""
CHUNK_SIZE = 1000
i = 0
maxCount = CHUNK_SIZE + 1
params = {'from': datetime.combine(search_date, time(8,0,0)).astimezone(timezone.utc).isoformat().replace('+00:00','Z'),
'to': datetime.combine(search_date, time(22,0,0)).astimezone(timezone.utc).isoformat().replace('+00:00','Z'),
'limit': CHUNK_SIZE,
'offset': 0,
'includePricesWithoutTurnover': False}
tradelist = []
while i * CHUNK_SIZE < maxCount:
params['offset'] = i * CHUNK_SIZE
data = _data_request('derivatives_trade_history', params)
maxCount = data['totalElements']
tradelist += data['data']
i += 1
return tradelist
def instrument_data(isin:str):
"""
Returns all information about given derivative ISIN.
Parameters
----------
isin : str
ISIN ov valid derivative.
Returns
-------
data : TYPE
Dict with information.
"""
params = {'isin': isin}
data = _data_request('derivatives_master_data', params)
return data
def search_criteria():
"""
Returns all multi-option criteria lists for derivatives search (not implemented yet)
Returns
-------
data : TYPE
Dict.
"""
params = {'lang': 'de',
'offset': 0,
'limit': 0,
'types': []}
data = _search_request('derivative_search_criteria_data', params)
return data
``` |
{
"source": "jo-qzy/AutoRobot",
"score": 2
} |
#### File: jo-qzy/AutoRobot/service.py
```python
import json
import requests
import socket
import time
import datetime
import redis
from apscheduler.schedulers.background import BackgroundScheduler
#from multiprocess import Process
def serviceServer(server):
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = 10086
ip = "0.0.0.0"
server.bind((ip, port))
server.listen(5)
print("Service server start success.")
def regitsterService(scheduler, jsonData):
webhook = jsonData['webhook']
message = jsonData['message']
if len(webhook) == 0:
return -1
if len(message) == 0:
return -1
# 解析时间
task_time = datetime.datetime.strptime(jsonData['time'], "%Y-%m-%d %H:%M:%S")
scheduler.add_job(messagePush, 'date', run_date=task_time, args=[webhook, message])
print(message)
return 0
def messagePush(webhook, message):
header = {'Content-Type': 'application/json'}
requests.post(webhook, data=json.dumps(message), params=header)
def schedulerService():
scheduler = BackgroundScheduler()
scheduler.start()
# 创建注册服务监听
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serviceServer(server)
# 不断的去监听,是否有新任务需要注册
while True:
result = -1
# 获取body
client, addr = server.accept()
body = client.recv(1024).decode('utf-8').split('\r\n\r\n', 1)[1]
print(body)
try:
jsonData = json.loads(body)
print(jsonData)
result = regitsterService(scheduler, jsonData)
except:
result = -1
# 返回注册者是否注册成功
if result == 0:
response = json.dumps({'status': 'OK'}).encode('utf-8')
client.send(response)
else:
response = json.dumps({'status': 'FAILED'}).encode('utf-8')
client.send(response)
client.close()
time.sleep(1)
if __name__ == '__main__':
schedulerService()
``` |
{
"source": "jora450000/gomoku.py",
"score": 3
} |
#### File: jora450000/gomoku.py/gomoku1.py
```python
import random
import pygame
import sys
from pygame.locals import *
MAX_X = 600
MAX_Y = 600
HORIZ = 20
VERT = 20
STEP_X = int(MAX_X / HORIZ)
STEP_Y = int(MAX_Y / VERT)
def draw_go(x, y, type_draw):
if type_draw == 0:
pygame.draw.rect(DISPLAYSURF, BLACK, (1+x*STEP_X,1+y*STEP_Y,STEP_X-1, -1+STEP_Y))
elif type_draw == 1:
pygame.draw.line(DISPLAYSURF, GREEN, (1+x*STEP_X,1+y*STEP_Y), (-1+(x+1)*STEP_X, -1+(y+1)*STEP_Y),1)
pygame.draw.line(DISPLAYSURF, GREEN, (-1+(x+1)*STEP_X,1+y*STEP_Y), (x*STEP_X+1, -1+(y+1)*STEP_Y),1)
elif type_draw == 2:
pygame.draw.ellipse(DISPLAYSURF, RED, (x*STEP_X +2 ,y*STEP_Y +2 , STEP_X - 4, STEP_Y - 4), 1)
a ={}
cX={}
cY={}
costW={}
costL={}
moves = []
rand = random.Random()
max_X = 20
max_Y = 20
def init(a):
for i in range(max_X):
for j in range(max_Y):
a[i,j] = 0;
def print_p(a):
for j in range(max_Y):
str = "";
for i in range(max_X):
if a[i,j] == 0 :
str+= "."
elif a[i,j] == 1:
str+="X"
elif a[i,j] == 2:
str+="O"
else:
str+="?"
print (str)
def map(x,y):
if ((x<0) or (x>=max_X) or (y<0) or (y>=max_Y)):
return -9999;
return a[x,y]
def sequence(a,x,y,dir_x, dir_y, Len): #return sequence wtih length = len
Seq=[]
for i in range (Len):
if map(x+i*dir_x,y+i*dir_y)>=0:
Seq.append(a[x+i*dir_x,y+i*dir_y])
if (len(Seq) == Len):
return Seq
return []
def in_sequence(x,y,x1,y1,dir_x, dir_y, Len): #return True if x,y in sequence with length = len
in_seq=False
for i in range (Len):
if (x==x1+i*dir_x) and (y==y1+i*dir_y):
in_seq=True
return in_seq
def check_win(a):
for i in range(max_X):
for j in range(max_Y):
if (i< (max_X - 4)) and (a[i,j] != 0) and (a[i,j]==a[i+1,j]) and (a[i,j]== a [i+2,j]) and (a[i,j] == a [i+3,j]) and (a[i,j] == a [i+4,j]):
## print ("(Y=const)i,j=", i+1,j+1)
return a[i,j]
elif (j < (max_Y - 4)) and (a[i,j] != 0) and (a[i,j] == a[i,j+1] == a[i,j+2] == a[i,j+3] == a [i,j+4]):
# print ("(X=const)i,j=", i+1,j+1)
return a[i,j]
elif ((i < (max_X - 4)) and (j < max_Y - 4)) and ((a[i,j] !=0) and (a[i,j] == a[i+1,j+1] == a[i+2,j+2] == a[i+3,j+3] == a[i+4,j+4])):
# print ("(diagon1)i,j=", i+1,j+1)
return a[i,j]
elif ((i < (max_X - 4)) and (j < (max_Y - 4) )) and (a[i+4,j] != 0) and (a[i+4,j] == a[i+3,j+1]== a[i+2, j+2] == a[i+1,j+3] == a[i,j+4]):
# print ("(diagon2)i-4,j=", i-3,j+1)
return a[i+4,j]
return 0
def cost_go(x1,y1,a, playG):
antiplayG = anti_g(playG)
cost =0
if (map(x1,y1) ==0):
a[x1,y1]=playG
else:
return (-9999)
for x in range (x1-6,x1+6):
for y in range (y1 -6, y1+6):
for i in range(-1,2):
for j in range(-1,2):
if (i!=0) or (j!=0):
#5 sec probe
if in_sequence(x1,y1,x,y,i,j,5):
sec = sequence(a,x,y,i,j,5)
if sec ==[playG,antiplayG,antiplayG,antiplayG,antiplayG]:
cost = max(460,cost)
elif sec ==[playG,antiplayG,antiplayG,antiplayG,0]:
cost = max(440,cost)
elif sec ==[0,antiplayG,antiplayG,antiplayG,playG]:
cost = max(440,cost)
elif sec==[0,antiplayG,antiplayG,playG,antiplayG,0]:
cost = max(cost,300)
elif sec==[playG,antiplayG,antiplayG,antiplayG,0]:
cost = max(cost,300)
elif sec==[0,antiplayG,antiplayG,antiplayG,playG]:
cost = max(cost,300)
elif sec ==[playG,playG,playG,playG,playG]:
cost = 500
elif sec==[0,playG,playG,0,playG,0]:
cost = max(cost,300)
elif sec==[0,playG,playG,playG,0]:
cost = max(cost,300)
#7 seq
if (cost < 450) and in_sequence(x1,y1,x,y,i,j,7):
sec = sequence(a,x,y,i,j,7)
if sec==[0,antiplayG,antiplayG,antiplayG,playG,antiplayG,0]:
cost=max(cost,430)
elif sec==[playG,antiplayG,antiplayG,antiplayG,playG,antiplayG,0]:
cost=max(cost,430)
elif sec==[0,antiplayG,antiplayG,antiplayG,playG,antiplayG,playG]:
cost=max(cost,430)
elif sec==[0,antiplayG,antiplayG,playG,antiplayG,antiplayG,0]:
cost=max(cost,430)
elif sec==[0,antiplayG,playG,antiplayG,antiplayG,antiplayG,0]:
cost=max(cost,430)
elif sec==[antiplayG,antiplayG,antiplayG,playG,antiplayG,antiplayG,0]:
cost=max(cost,410)
elif sec==[0,playG,playG,playG,0,playG,0]:
cost=max(cost,400)
elif sec==[0,playG,playG,0,playG,playG,0]:
cost=max(cost,400)
elif sec==[0,playG,0,playG,playG,playG,0]:
cost=max(cost,400)
#6 seq
if (cost < 455) and in_sequence(x1,y1,x,y,i,j,6):
sec = sequence(a,x,y,i,j,6)
if sec ==[playG,antiplayG,antiplayG,antiplayG,antiplayG,playG]:
cost = max(cost,450)
elif sec==[playG,antiplayG,antiplayG,antiplayG,antiplayG,0]:
cost=max(cost,400)
elif sec==[0,antiplayG,antiplayG,antiplayG,antiplayG,playG]:
cost=max(cost,400)
elif sec==[playG,antiplayG,antiplayG,antiplayG,antiplayG,playG]:
cost=max(cost,430)
elif sec==[0,antiplayG,playG,antiplayG,antiplayG,0]:
cost = max(cost,400)
elif sec==[0,antiplayG,antiplayG,playG,antiplayG,0]:
cost = max(cost,400)
elif sec==[0,playG,playG,playG,playG,0]:
cost=max(cost,455)
elif sec==[0,playG,0,playG,playG,0]:
cost = max(cost,200)
elif sec==[0,playG,playG,0,playG,0]:
cost = max(cost,200)
#4 seq
if (cost < 100) and in_sequence(x1,y1,x,y,i,j,4):
sec = sequence(a,x,y,i,j,4)
if sec==[playG,antiplayG,antiplayG,antiplayG]:
cost = max(cost,100)
elif sec==[playG,antiplayG,antiplayG,antiplayG]:
cost = max(cost,100)
elif x>0 and y > 0 and sec==[antiplayG,playG,antiplayG,antiplayG]:
cost = max(cost,100)
elif x> 0 and y> 0 and sec==[antiplayG,antiplayG,playG,antiplayG]:
cost = max(cost,100)
elif sec==[playG,antiplayG,antiplayG,0]:
cost = max(cost,30)
elif sec==[0,antiplayG,playG,antiplayG]:
cost = max(cost,10)
elif x>0 and sec==[antiplayG,antiplayG,playG,0]:
cost = max(cost,10)
elif sec==[antiplayG,playG,playG,playG]:
cost = max(cost,10)
#3 seq
if (cost < 10) and in_sequence(x1,y1,x,y,i,j,3):
sec = sequence(a,x,y,i,j,3)
if sec==[playG,antiplayG,0]:
cost = max(cost,5)
elif sec==[0,playG,antiplayG]:
cost = max(cost,3)
elif sec==[playG,antiplayG,0]:
cost = max(cost,3)
elif sec==[antiplayG,playG,0]:
cost = max(cost,2)
elif sec==[antiplayG,playG,antiplayG]:
cost = max(cost,3)
if sec==[0,playG,0]:
cost = max(cost,5)
elif sec==[0,0,playG]:
cost = max(cost,3)
elif sec==[0,playG,0]:
cost = max(cost,3)
elif sec==[playG,0,0]:
cost = max(cost,2)
elif sec==[playG,0,playG]:
cost = max(cost,3)
a[x1,y1] = 0
return cost
def rand_go(a,g):
X = rand.randint(0,max_X-1)
Y = rand.randint(0,max_Y-1)
if a[X,Y] == 0:
a[X,Y] =g
return True;
else:
return False #unable make go
def max_i(ax):
max_k = 0
for k in range (1, len(ax) ):
if ax[k] > ax[max_k]:
max_k = k
return max_k
def min_i(ax):
min_k = 0
for k in range (1, len(ax) ):
if ax[k] < ax[min_k]:
min_k = k
return min_k
def anti_g(g):
if g == 2:
return 1
return 2
def beginner_go(a,g):
cX = {}
cY = {}
costW = {}
costL = {}
if g == 2:
anti_g=1
else:
anti_g=2;
for k in range(len(moves)*24+50) :
(cX[k], cY[k] ) = moves[k % len(moves)]
iterator = 0
while (map(cX[k],cY[k]) != 0 and (iterator < 50)):
cX[k] += rand.randint(-2,3)
cY[k] += rand.randint(-2,3)
iterator += 1
costW[k] = cost_go(cX[k] ,cY[k], a, g)
best_strike = max_i(costW)
a[cX[best_strike], cY[best_strike]] = g;
draw_go(cX[best_strike], cY[best_strike], g);
moves.append((cX[best_strike], cY[best_strike]));
# cX=cY=costW=costL=[]
print (f"({cX[best_strike]}, {cY[best_strike]}, {costW[best_strike]})")
return (cX[best_strike], cY[best_strike], costW[best_strike])
def impedance_go(a,g):
cX = {}
cY = {}
costNext = {}
if g == 2:
anti_g=1
else:
anti_g=2
for k in range(len(moves)*24+50):
(cX[k], cY[k],costNext[k] ) = moves[k % len(moves)]
iterator =0
while (map(cX[k],cY[k]) != 0 and (iterator < 50)):
cX[k] += rand.randint(-2,3)
cY[k] += rand.randint(-2,3)
iterator += 1
beginner_go(a,anti_g)
beginner_go(a,g)
costW[k] = cost_go(cX[k] ,cY[k], a, g)
best_strike = max_i(costW)
a[cX[best_strike], cY[best_strike]] = g;
draw_go(cX[best_strike], cY[best_strike], g);
moves.append((cX[best_strike], cY[best_strike]));
return (cX[best_strike], cY[best_strike], costW[best_strike])
pygame.init()
# set up the window
DISPLAYSURF = pygame.display.set_mode((MAX_X, MAX_Y), 0, 32)
pygame.display.set_caption('Drawing')
# set up the colors
BLACK = (0,0,0)
WHITE = (255, 255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
# draw on the surface object
DISPLAYSURF.fill(BLACK)
for i in range (STEP_X, MAX_X , STEP_X):
pygame.draw.line(DISPLAYSURF, BLUE, (i, 0), (i, MAX_Y), 1)
for j in range (STEP_Y, MAX_Y , STEP_Y):
pygame.draw.line(DISPLAYSURF, BLUE, (0, j), (MAX_X, j), 1)
init(a)
# run the game loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
x = int (mousex / STEP_X)
y = int (mousey /STEP_Y)
if a[x,y] == 0 and check_win(a) == 0:
draw_go (x,y, 1)
pygame.display.update()
a[x,y] = 1
moves.append([x,y])
status_game = check_win(a)
if (status_game == 1):
print ("win X")
else:
beginner_go(a,2)
status_game = check_win(a)
if status_game == 2:
print("win O");
pygame.display.update()
#for i in range(1500):
# while(not rand_go(a,1)):
# mmm=2
# beginner_go(a,1);
# if (check_win(a) == 1):
# print ("win X")
# break
# while not rand_go(a,2):
# mm = 3
# beginner_go(a,2);
# if (check_win(a) == 2 ):
# print ("win O")
# break
# print_p(a)
# print ("==================move ", i)
#print("************last position******************************", i)
#print_p(a)
``` |
{
"source": "jorabold/similarity-transform",
"score": 4
} |
#### File: jorabold/similarity-transform/similarity_transform.py
```python
import numpy as np
from typing import Tuple
def similarity_transform(
X: np.ndarray, Y: np.ndarray, dim: int = 3
) -> Tuple[np.ndarray, float, np.ndarray]:
"""Calculate the similarity transform between two (matching) point sets.
Parameters
----------
X: np.ndarray
Points of first trajectory (dim x n matrix, where dim = 3 for 3D)
Y: np.ndarray
Points of second trajectory (dim x n matrix, where dim = 3 for 3D)
dim: int
Dimensionality of points
Returns
-------
R: np.ndarray
Rotation matrix from X to Y
c: np.ndarray
Scale factor from Y to Y
t: np.ndarray
Translation from X to Y
Reference
---------
<NAME>, "Least-squares estimation of transformation parameters
between two point patterns," in IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 13, no. 4, pp. 376-380, April 1991,
doi: 10.1109/34.88573.
"""
if X.shape[0] != dim:
raise ValueError(
f"You've set {dim=}, so X should have shape ({dim}xn) "
+ f"but is {X.shape}!"
)
if Y.shape[0] != dim:
raise ValueError(
f"You've set {dim=}, so Y should have shape ({dim}xn) but "
+ f"is {Y.shape}!"
)
if X.shape != Y.shape:
raise ValueError(
f"X and Y must have same shape! But {X.shape} != {Y.shape}"
)
m, n = X.shape
mu_x = np.mean(X, axis=1)
mu_y = np.mean(Y, axis=1)
X_centered = (X.T - mu_x).T
Y_centered = (Y.T - mu_y).T
s_xx = np.mean(np.sum(X_centered**2, 0))
Sigma_xy = 1 / n * X_centered @ Y_centered.T
U, D, V = np.linalg.svd(Sigma_xy)
V = V.T # numpy has it the other way around as Umeyama
D = np.diag(D)
S = np.eye(m)
if np.linalg.matrix_rank(Sigma_xy) > (m - 1):
if np.linalg.det(Sigma_xy) < 0:
S[m - 1, m - 1] = -1
elif np.linalg.matrix_rank(Sigma_xy) == m - 1:
if np.linalg.det(U) * np.linalg.det(V) < 0:
S[m - 1, m - 1] = -1
else:
print("Rank too small! Cannot estimate transformation.")
R = np.eye(m)
c = 1
t = np.zeros(m)
return R, c, t
R = (U @ S @ V.T).T
c = np.trace(D @ S) / s_xx
t = mu_y - c * R @ mu_x
return R, c, t
def construct_transformation_matrix(
R: np.ndarray, c: np.ndarray, t: np.ndarray
) -> np.ndarray:
"""Get transformation matrix from rotation R, scale c and translation."""
n = R.shape[0]
T = np.identity(n + 1)
T[:n, :n] = c * R
T[:n, n] = t
return T
def apply_transformation(points: np.ndarray, T: np.ndarray) -> np.ndarray:
"""Transform points with transformation matrix T.
Parameters
----------
points: np.ndarray
3 x n matrix containing the points to transform
T: np.ndarray
4 x 4 transformation matrix in homogeneous coordinates
Returns
-------
np.ndarray
3 x n matrix containing the transformed points
"""
m = points.shape[0]
if m != 3:
raise ValueError(f"points should be (3xn) but is {points.shape}!")
src = np.ones((m + 1, points.shape[1]))
src[:m, :] = np.copy(points)
src = np.dot(T, src)
return src[:m, :] / src[m:, :]
``` |
{
"source": "joragupra/CD4ML-Scenarios",
"score": 2
} |
#### File: CD4ML-Scenarios/cd4ml/app.py
```python
from flask import Flask, render_template, request
import cd4ml.app_utils as utils
from cd4ml.fluentd_logging import FluentdLogger
app = Flask(__name__, template_folder='webapp/templates',
static_folder='webapp/static')
fluentd_logger = FluentdLogger()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/replace_model', methods=["POST"])
def replace_model():
content = request.get_data(as_text=False)
utils.replace_model_file(content)
return "OK", 200
@app.route('/replace_encoder', methods=["POST"])
def replace_encoder():
content = request.get_data(as_text=False)
utils.replace_encoder_file(content)
return "OK", 200
@app.route('/prediction')
def get_prediction():
date_string = request.args.get('date')
item_nbr = request.args.get("item_nbr")
prediction_tuple = utils.get_prediction(item_nbr, date_string)
status = prediction_tuple[0]
prediction = prediction_tuple[1]
log_payload = {
'prediction': prediction,
'itemid': item_nbr,
'item_name': utils.get_product_name_from_id(item_nbr),
'date_string': date_string
}
log_prediction_console(log_payload)
fluentd_logger.log('prediction', log_payload)
if status == "ERROR":
return prediction, 503
else:
return "%d" % prediction, 200
def log_prediction_console(log_payload):
print('logging {}'.format(log_payload))
```
#### File: CD4ML-Scenarios/cd4ml/date_utils.py
```python
import datetime
import arrow
from cd4ml.memo import memo
@memo
def parse_date_to_arrow(date_string):
first_part = date_string.split()[0]
first_part = first_part.split(":")[0]
try:
format_string = 'M/D/YY'
arrow_date = arrow.get(first_part, format_string)
except arrow.parser.ParserError:
try:
format_string = 'DD-MMM-YY'
arrow_date = arrow.get(first_part, format_string)
except arrow.parser.ParserError:
try:
format_string = 'YYYY-MM-DD'
arrow_date = arrow.get(first_part, format_string)
except arrow.parser.ParserError:
format_string = 'DDMMMYYYY'
arrow_date = arrow.get(first_part, format_string)
return arrow_date
@memo
def parse_date_as_datetime_date(date_string):
return parse_date_to_arrow(date_string).date()
def get_days_from_start_date(current_date, start_date):
diff_sec = (current_date - start_date).total_seconds()
return diff_sec / (24 * 3600.0)
def get_day_range_dates(num_days, stride_days):
first = datetime.date(2000, 1, 1)
return [first + datetime.timedelta(days=x)
for x in range(0, num_days * stride_days, stride_days)]
@memo
def convert_date_to_ymd(date):
"""
:param date: a date string
:return: date in 'YYYY-MM-DD' format (e.g. 1978-03-30)
"""
date_format = 'YYYY-MM-DD'
return parse_date_to_arrow(date).format(date_format)
def add_to_date_string(date_string, years=0, months=0, days=0):
arrow_obj = parse_date_to_arrow(date_string)
fmt = 'YYYY-MM-DD'
return arrow_obj.shift(years=years, months=months, days=days).format(fmt)
def diff_days_date_strings(date_string_start, date_string_end):
arrow_obj_start = parse_date_to_arrow(date_string_start)
arrow_obj_end = parse_date_to_arrow(date_string_end)
return (arrow_obj_end - arrow_obj_start).days
@memo
def date_string_to_date(date):
year, month, day = date_to_ymd(date)
return datetime.date(year, month, day)
@memo
def ymd_to_date_string(ymd):
year, month, day = ymd
return "%s-%s-%s" % (str(year), str(month).zfill(2), str(day).zfill(2))
@memo
def date_to_ymd(date_string):
ymd = date_string.split('-')
ymd = [int(i) for i in ymd]
year, month, day = ymd
return year, month, day
@memo
def ymd_to_weekday(ymd):
date = datetime.datetime(ymd)
return date.weekday()
@memo
def date_string_to_weekday(date_string):
ymd = date_to_ymd(date_string)
date = datetime.datetime(*ymd)
return date.weekday()
```
#### File: CD4ML-Scenarios/cd4ml/evaluation.py
```python
import numpy as np
import pandas as pd
def nwrmsle(predictions, targets, weights):
if type(predictions) == list:
predictions = np.array([np.nan if x < 0 else x for x in predictions])
elif type(predictions) == pd.Series:
predictions[predictions < 0] = np.nan
targetsf = targets.astype(float)
targetsf[targets < 0] = np.nan
weights = 1 + 0.25 * weights
log_square_errors = (np.log(predictions + 1) - np.log(targetsf + 1)) ** 2
return(np.sqrt(np.sum(weights * log_square_errors) / np.sum(weights)))
```
#### File: CD4ML-Scenarios/cd4ml/run_ml.py
```python
from cd4ml import tracking
from cd4ml.read_data import stream_data, get_encoder
from cd4ml.splitter import get_cutoff_dates, train_filter
from cd4ml.train import train_model
from cd4ml.validate import validate
def run_ml_model(pipeline_params, encoder, track, date_cutoff, seed=None):
target_name = 'unit_sales'
train_stream = (row for row in stream_data(pipeline_params) if train_filter(row, date_cutoff))
encoded_train_stream = encoder.encode_data_stream(train_stream)
print('Encoding data')
# batch step, read it all in
encoded_train_data = list(encoded_train_stream)
print('Getting target')
# read it all in
target = [row[target_name] for row in stream_data(pipeline_params) if train_filter(row, date_cutoff)]
model_name = pipeline_params['model_name']
params = pipeline_params['model_params'][model_name]
track.log_ml_params(params)
track.log_pipeline_params(pipeline_params)
trained_model, params = train_model(encoded_train_data, target, model_name, params, seed=seed)
return trained_model, params
def run_all(pipeline_params):
# pass in pipeline_params so you maintain top level
# programmatic control over all of the pipeline
encoder = get_encoder(pipeline_params, write=True, read_from_file=False)
date_cutoff, max_date = get_cutoff_dates(pipeline_params)
# For testing/debugging
run_all_models = False
with tracking.track() as track:
if run_all_models:
# This is mostly for testing/debugging right now, not fully supported
# models will overwrite each other and only the last one will show up in
# ML flow. Turning this on can demonstrate that all models can run
# They might not all pass the acceptance threshold though
all_model_names = sorted(list(pipeline_params['model_params'].keys()))
print('All model names')
print(all_model_names)
else:
all_model_names = [pipeline_params['model_name']]
for model_name in all_model_names:
pipeline_params['model_name'] = model_name
trained_model, params = run_ml_model(pipeline_params, encoder, track, date_cutoff)
validate(pipeline_params, trained_model, encoder, track, date_cutoff, max_date)
```
#### File: CD4ML-Scenarios/cd4ml/validation_plots.py
```python
from bokeh.plotting import figure, save
from cd4ml.filenames import file_names
def get_validation_plot(true_value, prediction):
x_min = min(min(true_value), min(prediction))
x_max = max(max(true_value), max(prediction))
x_range = [x_min, x_max]
y_range = x_range
plot = figure(width=800, height=800,
x_range=x_range, y_range=y_range)
plot.xaxis.axis_label = "True value"
plot.xaxis.axis_label_text_font_size = '14pt'
plot.xaxis.major_label_text_font_size = '12pt'
plot.yaxis.axis_label = "Prediction"
plot.yaxis.axis_label_text_font_size = '14pt'
plot.yaxis.major_label_text_font_size = '12pt'
plot.circle(true_value, prediction)
plot.line(x_range, y_range, line_dash='dashed', color='gray')
return plot
def make_validation_plot(true_value, prediction, track):
plot = get_validation_plot(true_value, prediction)
filename = file_names['validation_plot']
print('Writing validation plot: %s' % filename)
save(plot, filename=filename, title='validation plot')
track.log_artifact(filename)
```
#### File: CD4ML-Scenarios/scripts/acceptance.py
```python
from cd4ml.accept_model import check_model_performance
from cd4ml.pipeline_params import pipeline_params
def main(*args):
"""
Check model meets acceptance threshold
"""
metric = pipeline_params['acceptance_metric']
threshold_min = pipeline_params['acceptance_threshold_min']
threshold_max = pipeline_params['acceptance_threshold_max']
check_model_performance(metric, threshold_min, threshold_max)
```
#### File: CD4ML-Scenarios/scripts/pipeline.py
```python
import numpy as np
from cd4ml import pipeline_helpers as ph
from cd4ml.pipeline_params import pipeline_params
def main(*args):
"""
Run the pipeline
"""
args = args[0]
if len(args) > 0:
variable = args[0]
else:
variable = None
np.random.seed(462748)
if variable:
print('variable: %s' % variable)
if pipeline_params["data_source"] == "file":
ph.download_data(pipeline_params)
ph.train_and_validate_model(pipeline_params)
``` |
{
"source": "jorahn/icy",
"score": 3
} |
#### File: icy/ml/explore.py
```python
import numpy as np
def show_dtypes(data):
for key in data:
print(key)
print(data[key].get_dtype_counts())
print()
def show_cat_uniques(data):
for key in data:
print(key, data[key].shape)
for col in data[key].select_dtypes(exclude=[np.number]):
print(col, len(data[key][col].unique()))
print()
def show_nan(data):
for key in data:
print(key, data[key].shape)
for col in data[key]:
nans = sum(data[key][col].isnull())
if nans > 0:
print(col, nans)
print()
def show_cov(data, labels, top=3):
top_cov = []
for col in data.select_dtypes(include=[np.number]):
cov = labels.cov(data[col])
if len(top_cov) < top or cov > min([e[1] for e in top_cov]):
if not np.isnan(cov):
top_cov.append((col, cov))
top_cov = sorted(top_cov, key=lambda x: x[1], reverse=True)[:top]
if len(top_cov) > 0:
for e in top_cov:
print(e[0], e[1])
print()
def show_corr(data, labels, top=3, high=True):
top_corr = []
for col in data.select_dtypes(include=[np.number]):
corr = labels.corr(data[col])
if high:
if len(top_corr) < top or corr > min([e[1] for e in top_corr]):
if not np.isnan(corr):
top_corr.append((col, corr))
top_corr = sorted(top_corr, key=lambda x: x[1], reverse=True)[:top]
else:
if len(top_corr) < top or corr < max([e[1] for e in top_corr]):
if not np.isnan(corr):
top_corr.append((col, corr))
top_corr = sorted(top_corr, key=lambda x: x[1], reverse=False)[:top]
if len(top_corr) > 0:
for e in top_corr:
print(e[0], e[1])
print()
```
#### File: icy/icy/utils.py
```python
import os
def str_remove_accents(s):
"""Utility to remove accents from characters in string"""
import unicodedata
return unicodedata.normalize('NFD', s).encode('ascii','ignore').decode('ascii')
def pdf_extract_text(path, pdfbox_path, pwd='', timeout=120):
"""Utility to use PDFBox from pdfbox.apache.org to extract Text from a PDF
Parameters
----------
path : str
Path to source pdf-file
pdfbox_path : str
Path to pdfbox-app-x.y.z.jar
pwd : str, optional
Password for protected pdf files
timeout : int, optional
Seconds to wait for a result before raising an exception (defaults to 120).
Returns
-------
file
Writes the result as the name of the source file and appends '.txt'.
Notes
-----
- Requires pdfbox-app-x.y.z.jar in a recent version (see http://pdfbox.apache.org).
- Requires Java (JDK) 1.5 or newer (see http://www.oracle.com/technetwork/java/javase/downloads/index.html).
- Requires java to be on the PATH.
"""
if not os.path.isfile(path):
raise IOError('path must be the location of the source pdf-file')
if not os.path.isfile(pdfbox_path):
raise IOError('pdfbox_path must be the location of the pdfbox.jar')
import subprocess
for p in os.environ['PATH'].split(':'):
if os.path.isfile(os.path.join(p, 'java')):
break
else:
print('java is not on the PATH')
return
try:
if pwd == '':
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', path, path+'.txt']
else:
cmd = ['java', '-jar', pdfbox_path, 'ExtractText', '-password', pwd,
path, path+'.txt']
subprocess.check_call(cmd, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=timeout)
except subprocess.TimeoutExpired as e:
print('Timeout of {:.1f} min expired'.format(timeout/60))
except subprocess.CalledProcessError as e:
print('Text could not successfully be extracted.')
def xml_to_json(s):
from icy.ext.xml2json import xml2json
from collections import namedtuple
Options = namedtuple('options', ['pretty'])
xml2json_opts = Options(True)
return xml2json(s, xml2json_opts)
``` |
{
"source": "JoramD0/dcs-livery-unlocker",
"score": 3
} |
#### File: dcs-livery-unlocker/src/main.py
```python
import os
import PySimpleGUI as sg
from ui import UI
from utils import Utils, Notifier, check_saved_games
def main():
DEBUG = False
ui = UI()
utils = Utils()
notifications = Notifier(ui.window)
while True:
menu_event, menu_values = ui.window.read(timeout=100)
if menu_event != "__TIMEOUT__":
notifications.clear()
if DEBUG:
print(f"LOG - MENU_ENVENT: {menu_event}")
print(f"LOG - MENU_VALUES: {menu_values}")
if menu_event == sg.WIN_CLOSED:
break
if menu_event == '-DCSDIR-':
dcs_dir = menu_values['-DCSDIR-']
if os.path.exists(dcs_dir) and 'DCS' in dcs_dir:
utils.dcs_dir = dcs_dir
else:
notifications.add('DCS directory not found.')
if menu_event == '-SAVEDGAMESDIR-':
saved_games_dcs_dir = menu_values['-SAVEDGAMESDIR-']
if os.path.exists(saved_games_dcs_dir) and (
saved_games_dcs_dir.endswith('DCS') or saved_games_dcs_dir.endswith('DCS.openbeta')
):
utils.saved_games_dcs_dir = saved_games_dcs_dir
else:
notifications.add('SavedGames/DCS directory not found.')
if menu_event == '-START-':
if utils.ready():
notifications.clear()
ui.window['-START-'].update(disabled=True)
print(utils.saved_games_dcs_dir, utils.dcs_dir)
try:
utils.fix_default_liveries()
# utils.fix_mods_liveries() # this one is commented because each mod does it their own way
# utils.fix_downloaded_liveries() # Not useful to me
utils.fix_bazar_liveries()
notifications.add('Done! You may close the program.')
ui.window['-START-'].update(disabled=False)
except Exception as e:
notifications.add('Something went wrong. Check log file.')
with open('dcs_nation_skin_unlocker.log', 'a+') as f:
f.write(e, encoding='utf-8')
else:
notifications.add('Paths not set.')
ui.window.close()
if __name__ == '__main__':
main()
```
#### File: dcs-livery-unlocker/src/utils.py
```python
import os, shutil
from glob import glob
def check_saved_games(saved_games_dcs_dir):
for folder_name in os.listdir(saved_games_dcs_dir):
if folder_name in ('DCS', 'DCS.openbeta'):
return True
return False
class Utils:
def __init__(self):
self.dcs_dir = ''
self.saved_games_dcs_dir = ''
# self.window = ui_window
@staticmethod
def comment_out_countries_restriction(file_path):
'''
'''
print(f'Unlocking: {file_path}')
lines = []
with open(file_path, 'r', encoding='UTF-8') as file:
lines = file.readlines()
open_flag = False
for i, line in enumerate(lines):
if 'countries = {' in line and '}' in line: # Single-line comment
lines[i] = f'-- {line}'
if 'countries = {' in line and '}' not in line: # Opening Line of block
open_flag = True
print(f'Unlocking: {file_path}')
lines[i] = f'-- {line}'
if open_flag and '}' not in line: # Center of block
lines[i] = f'-- {line}'
if open_flag and '}' in line: # End of block
open_flag = False
lines[i] = f'-- {line}'
with open(file_path, 'w', encoding='UTF-8') as f:
f.writelines(lines)
def ready(self):
return self.dcs_dir != '' and self.saved_games_dcs_dir != ''
def fix_default_liveries(self):
CORE_MODS_GLOB_STR = os.path.join(self.dcs_dir, 'CoreMods/aircraft/**/Liveries/**/**/*.lua')
for file_path in glob(CORE_MODS_GLOB_STR):
if 'Pack\Liveries' not in file_path:
with open(file_path, 'r', encoding='UTF-8') as f:
if 'countries = {' in f.read():
# Move file to SavedGames
main_path = os.path.abspath(file_path)
vehicle_name = os.path.basename(os.path.normpath(os.path.join(main_path, os.pardir, os.pardir)))
texture_name = os.path.basename(os.path.normpath(os.path.join(main_path, os.pardir)))
target_path = os.path.join(self.saved_games_dcs_dir, 'Liveries/', vehicle_name, texture_name, 'description.lua')
try:
os.makedirs(os.path.dirname(target_path), exist_ok=True)
shutil.copy(main_path, target_path)
except shutil.Error as err:
print(err.args[0])
self.comment_out_countries_restriction(target_path)
def fix_downloaded_liveries(self):
SAVED_GAMES_LIVERIES = os.path.join(self.saved_games_dcs_dir, '?iveries/**/*.lua')
for file_path in glob(SAVED_GAMES_LIVERIES):
self.comment_out_countries_restriction(file_path)
# Not used
def fix_mods_liveries(self):
SAVED_GAMES_MODS_LIVERIES = os.path.join(self.saved_games_dcs_dir, '?ods/aircraft/**/Liveries/**/*.lua')
for file_path in glob(SAVED_GAMES_LIVERIES):
if 'Pack\Liveries' not in file_path:
self.comment_out_countries_restriction(file_path)
def fix_bazar_liveries(self):
CORE_MODS_GLOB_STR = os.path.join(self.dcs_dir, 'Bazar/Liveries/**/**/*.lua')
for file_path in glob(CORE_MODS_GLOB_STR):
with open(file_path, 'r', encoding='UTF-8') as f:
if 'countries = {' in f.read():
# Move file to SavedGames
main_path = os.path.abspath(file_path)
vehicle_name = os.path.basename(os.path.normpath(os.path.join(main_path, os.pardir, os.pardir)))
texture_name = os.path.basename(os.path.normpath(os.path.join(main_path, os.pardir)))
target_path = os.path.join(self.saved_games_dcs_dir, 'Liveries/', vehicle_name, texture_name, 'description.lua')
try:
os.makedirs(os.path.dirname(target_path), exist_ok=True)
shutil.copy(main_path, target_path)
except shutil.Error as err:
print(err.args[0])
self.comment_out_countries_restriction(target_path)
class Notifier:
'''
Notifications wrapper
'''
def __init__(self, window):
self.window = window
self.buffer = ''
def notify(self, msg):
'''
Writes notification to the notifications area.
'''
self.window['-NOTIFICATIONS-'].update(visible=True)
self.window['-NOTIFICATIONS-'].update(value=msg)
self.buffer = msg
def add(self, msg):
'''
Adds a notification to the notifications area.
'''
self.window['-NOTIFICATIONS-'].update(visible=True)
self.window['-NOTIFICATIONS-'].update(value=self.buffer + msg)
self.buffer += msg
def clear(self):
'''
Clears the notifications area.
'''
self.window['-NOTIFICATIONS-'].update(value='')
self.window['-NOTIFICATIONS-'].update(visible=False)
self.buffer = ''
``` |
{
"source": "JoramKeijser/iLQR-jax",
"score": 2
} |
#### File: iLQR-jax/src/network_and_arm.py
```python
import jax
from jax import numpy as np
from jax.lax import scan
from jax import vmap, jit
import pickle
import arm_model
# Network dynamics
with open("../data/network_s972356.pickle", 'rb') as handle:
data = pickle.load(handle)
params = data['params']
C = np.asarray(params['C'])
W = np.asarray(data['W'])
hbar = np.asarray(data['hbar'])
phi = lambda x: x
def continuous_network_dynamics(x, inputs):
tau = 150
return (-x + W.dot(phi(x)) + inputs + hbar) / tau
def discrete_network_dynamics(x, inputs):
# x: (neurons + 2, ), first two dims are readouts
dt = 1.0
y, h = x[:2], x[2:]
h = h + dt*continuous_network_dynamics(h, inputs)
y = h.dot(C)
x_new = np.concatenate((y, h))
return x_new, x_new
# Combine them
def discrete_dynamics(x, inputs):
"""
x: [y, h, q] of size 2+N+4 = N+6
inputs: size (N, )
"""
N = inputs.shape[0]
network_states = discrete_network_dynamics(x[:N+2], inputs)[0]
y, h = network_states[:2], network_states[2:]
arm_states = arm_model.discrete_dynamics(x[N+2:], y)[0]
x_new = np.concatenate((network_states, arm_states))
return x_new, x_new
def rollout(x0, u_trj):
"""
x0: init states [y0, h0, q0], size (N+6, )
u_trj: network inputs, size (N, )
"""
N = u_trj.shape[1]
_, x_trj = scan(discrete_dynamics, x0, u_trj)
y, h, q = x_trj[:,:2], x_trj[:,2:N+2], x_trj[:,N+2:]
return y, h, q
rollout_jit = jit(rollout)
rollout_batch = jit(vmap(rollout), (0,0))
``` |
{
"source": "joram/ml-snek",
"score": 3
} |
#### File: ml_snek/dataloaders/dataloader.py
```python
import numpy
from .base_dataloader import BaseDataloader
class Dataloader(BaseDataloader):
def __init__(self, dataset):
self._dataset = dataset
def _frame_to_image(self, frame, winner_id):
w = frame["board"]["height"]
h = frame["board"]["width"]
data = {}
for x in range(0, w):
for y in range(0, h):
data[x, y] = 0
# body
body_data = data
for snake in frame["board"]["snakes"]:
body = snake["body"]
for i in range(len(body) - 1, 1, -1):
coord = body[i]
body_data[coord["x"], coord["y"]] = i
body_data = list(body_data.values())
# food
food_data = data
foods = frame["board"]["food"]
for coord in foods:
food_data[coord["x"], coord["y"]] = 1
food_data = list(food_data.values())
# head
my_head_data = data
their_head_data = data
for snake in frame["board"]["snakes"]:
head = snake["body"][0]
if snake["id"] == winner_id:
my_head_data[head["x"], head["y"]] = 1
else:
their_head_data[head["x"], head["y"]] = 1
my_head_data = list(my_head_data.values())
their_head_data = list(their_head_data.values())
return numpy.array([] + body_data + food_data + my_head_data + their_head_data)
def _string_dir_to_int(self, direction):
return {
"UP": 0,
"DOWN": 1,
"LEFT": 2,
"RIGHT": 3,
}[direction]
def __getitem__(self, index):
frame, winner_id, direction = self._dataset[index]
input_values = self._frame_to_image(frame, winner_id)
output_value = self._string_dir_to_int(direction)
return input_values, output_value
def __iter__(self):
for frame, winner_id, direction in self._dataset:
input_values = self._frame_to_image(frame, winner_id)
output_value = self._string_dir_to_int(direction)
yield input_values, output_value
```
#### File: ml_snek/datasets/jsnek_dataset.py
```python
from .jsnek_base_dataset import JSnekBaseDataset
from .. import utils
class JSnekDataset(JSnekBaseDataset):
"""Represents a board state in the following way:
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
"""
def __init__(
self, board_state_as_vector=False, direction_as_index=False, max_frames=-1
):
super().__init__(max_frames=max_frames)
self.board_state_as_vector = board_state_as_vector
self.direction_as_index = direction_as_index
def __getitem__(self, index):
"""
Parameters
----------
index : int
Index of datum
Returns
-------
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
or
Index if direction_as_index = True
"""
frame, winner_id, direction = super().__getitem__(index)
board_state = utils.frame_to_image(frame, winner_id)
if self.board_state_as_vector:
board_state = board_state.view([board_state.numel()])
if self.direction_as_index:
direction = utils.direction_to_index(direction)
else:
direction = utils.direction_to_onehot(direction)
return board_state, direction
```
#### File: ml_snek/tests/test_dataloaders.py
```python
from torch.utils.data import DataLoader
def test_flat_dataloader(dataset_jsnek):
batch_size = 32
dataloader = DataLoader(dataset_jsnek, batch_size=batch_size)
first_item = dataloader.__iter__().__next__()
assert type(first_item) == list
assert len(first_item) == 2
for i in range(len(first_item)):
assert len(first_item[i]) == batch_size
```
#### File: ml_snek/utils/utils.py
```python
import os
import importlib
from PIL import Image
import torch
from torchvision import transforms
def _image_to_input(pixels, w, h):
values = []
for x in range(0, w):
for y in range(0, h):
val = pixels[x, y]
values.append(val[0])
values.append(val[1])
values.append(val[2])
return values
def gen_training_data(limit=100):
i = 0
values = []
trans1 = transforms.ToTensor()
for filename in os.listdir("../images/"):
filepath = os.path.join("../images/", filename)
img = Image.open(filepath)
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
img_data = trans1(img)
directions = ["UP", "DOWN", "LEFT", "RIGHT", "WFT!"]
try:
direction = filename.split("::")[1].replace(".png", "")
except:
continue
d = directions.index(direction)
if d == directions[-1]:
continue
# trans2 = transforms.ToTensor()
# d = trans2(d)
values.append((img_data, d))
if i > limit:
break
i += 1
return values
def frame_to_image(frame, winner_id):
w = frame["board"]["height"]
h = frame["board"]["width"]
# The board representation is C x H x W where
# Channels are [body pos, food pos, self head pos, other head pos]
data = torch.zeros([4, h, w])
# body
current_channel = 0
for snake in frame["board"]["snakes"]:
body = snake["body"]
for i in range(len(body) - 1, 1, -1):
coord = body[i]
data[current_channel, coord["y"], coord["x"]] = i
# food
current_channel += 1
foods = frame["board"]["food"]
for coord in foods:
data[current_channel, coord["y"], coord["x"]] = 1
# head
my_head_channel = current_channel + 1
their_head_channel = current_channel + 2
for snake in frame["board"]["snakes"]:
head = snake["body"][0]
if snake["id"] == winner_id:
data[my_head_channel, head["y"], head["x"]] = 1
else:
data[their_head_channel, head["y"], head["x"]] = 1
return data
DIRECTION_DICT = {
"UP": 0,
"DOWN": 1,
"LEFT": 2,
"RIGHT": 3,
}
N_DIRECTIONS = len(DIRECTION_DICT)
def direction_to_index(direction: str) -> int:
"""Converts string representation of direction into integer
Parameters
----------
direction: str
"UP", "DOWN", "LEFT", "RIGHT
Returns
-------
direction_onehot: torch.LongTensor
Integer representation of the direction
"""
return torch.Tensor([DIRECTION_DICT[direction]]).long()
def direction_to_onehot(direction: str) -> torch.Tensor:
"""Converts string representation of direction into onehot
Parameters
----------
direction: str
"UP", "DOWN", "LEFT", "RIGHT
Returns
-------
direction_onehot: torch.Tensor
Onehot representation of the direction
"""
direction_onehot = torch.zeros(N_DIRECTIONS)
direction_onehot[direction_to_index(direction)] = 1
return direction_onehot
def load_object(object_name, object_kwargs):
object_module, object_name = object_name.rsplit(".", 1)
object_module = importlib.import_module(object_module)
return getattr(object_module, object_name)(**object_kwargs)
``` |
{
"source": "joramwessels/torcs-client",
"score": 3
} |
#### File: joramwessels/torcs-client/basic_control.py
```python
from math import radians
import numpy as np
import torch
import ffnn_speed
import ffnn_steer
ANGLES = [90, 75, 60, 45, 30, 20, 15, 10, 5, 0, -5, -10, -15, -20, -30, -45, -60, -75, -90]
class BasicControl:
def __init__(self, steering_values):
""" Short description
Complete multiline
description of class
Args:
steering_values: ???
"""
self.brake_row = 0
self.speed = ffnn_speed.Speed(10)
self.steer = ffnn_steer.Steer(10)
self.steer.load_state_dict(torch.load("./steer.data"))
self.speed.load_state_dict(torch.load("./ffnn_speed.data"))
self.steering_values = steering_values
self.alphas = [radians(x) for x in ANGLES]
def deal_with_opponents(self, steer_pred, pedal, speed_x,
distance_from_center, opponents_new, opponents_delta):
""" Description
Args:
steer_pred: ?
pedal: ?
speed_x: ?
distance_from_center: ?
opponents_new: ?
opponents_delta: ?
Returns:
???
"""
# index 18 is in front
# index 35 in behind us
adjustment = 0.1
# if there are cars infront-left -> move to right
if opponents_new[17] < 10 or opponents_new[16] < 10 or opponents_new[15] < 10:
print("ADJUSTING SO NOT TO HIT")
steer_pred -= adjustment
if opponents_new[19] < 10 or opponents_new[20] < 10 or opponents_new[21] < 10:
print("ADJUSTING SO NOT TO HIT")
# move to left
steer_pred += adjustment
if opponents_new[18] < 50:
# we are on left side -> move right
if distance_from_center > 0:
steer_pred -= adjustment
# o.w. move left
else:
steer_pred += adjustment
if speed_x > 100:
# we are getting closer to the car in front (and we can't avoid it). We need to slow down a bit
if (opponents_delta[18] < 0 and opponents_new[18] < 20) or (opponents_delta[17] < 0 and opponents_new[17] < 4) or (opponents_delta[19] < 0 and opponents_new[19] < 4):
pedal -= 0.1
return steer_pred, pedal
def steer_decider(self, carstate):
""" Description
Args:
carstate: The full carstate
Returns:
Steering angle?
"""
alpha_index = np.argmax(carstate.distances_from_edge)
if is_straight_line(carstate=carstate, radians=self.alphas[alpha_index], factor=self.steering_values[4]):
return carstate.angle * 0.5
steering_function = lambda index, offset:\
(self.alphas[index-offset] * carstate.distances_from_edge[index-offset] \
+ self.alphas[index+offset] * carstate.distances_from_edge[index+offset]) \
/ (carstate.distances_from_edge[index+offset] \
+ carstate.distances_from_edge[index-offset])
steer = self.steering_values[0] * self.alphas[alpha_index]
for x in range(1, 4):
if alpha_index - x > -1 and alpha_index + x < len(self.steering_values):
steer += self.steering_values[x]*steering_function(alpha_index, x)
return steer
def speed_decider(self, carstate, max_speed=120):
""" Description
Args:
carstate: The full carstate
max_speed: ???
Returns:
???
"""
# we predict speed and map that to pedal
x_in = ffnn_speed.carstate_to_variable(carstate)
target_speed = self.speed(x_in).data[0]
# we limit the speed
if target_speed >= max_speed:
target_speed = max_speed
pedal = 2/(1 + np.exp(carstate.speed_x - target_speed))-1
return pedal
def gear_decider(self, carstate):
""" Description
Args:
carstate: The full carstate
Returns:
The gear to shift to (int)
"""
gear = carstate.gear
rpm = carstate.rpm
# we do gears by hand
# up if {9500 9500 9500 9500 9000}
# down if {4000 6300 7000 7300 7300}
if gear == -1:
return 1
elif gear == 0:
if rpm >= 5000:
gear = 1
elif gear == 1:
if rpm >= 9500:
gear = 2
elif gear == 2:
if rpm >= 9500:
gear = 3
elif rpm <= 4000:
gear = 2
elif gear == 3:
if rpm >= 9500:
gear = 4
elif rpm <= 6300:
gear = 3
elif gear == 4:
if rpm >= 9500:
gear = 5
elif rpm <= 7000:
gear = 3
elif gear == 5:
if rpm >= 9000:
gear = 6
elif rpm <= 7300:
gear = 4
elif gear == 6:
if rpm <= 7300:
gear = 5
return gear
def disambiguate_pedal(self, pedal, accel_cap=0.5, break_cap=0.75, break_max_length=5):
""" Description
Args:
???
Returns:
The break and accelerator command values
"""
if pedal >= 0.0:
accelerator = pedal*accel_cap
brake = 0
else:
# we need to make sure that we don't break hard enough and not too long
self.brake_row += 1
if self.brake_row <= break_max_length:
brake = abs(pedal)*break_cap
else:
self.brake_row = 0
brake = 0
accelerator = 0
return brake, accelerator
def is_straight_line(carstate, radians, factor):
""" Decides whether ??? is a straight line
Args:
carstate: The full carstate
radians: ???
factor: ???
Returns:
A boolean indicating whether ???
"""
if abs(carstate.distance_from_center) < 0.75:
if radians == 0:
return True
if carstate.distances_from_edge[9] > 190:
return True
if carstate.distances_from_edge[9] > factor * carstate.speed_x:
return True
return False
```
#### File: joramwessels/torcs-client/combined_driver.py
```python
from pytocl.driver import Driver
from pytocl.car import State, Command
from sys import stderr
from math import radians
from operator import sub
from driver_utils import *
from basic_control import BasicControl
from swarm import FeromoneTrail
from crisis_driver import CrisisDriver
from mlp import load_model
# - NOTE crash detection for swarm only checks for off road
# - NOTE collision detection for swarm might be too sensitive
# - TODO clear up swarm global parameters
# - TODO swarm debug output is piped to stderr
ENABLE_SWARM = True
ENABLE_CRISIS_DRIVER = True
ENABLE_NETWORK = False
# Neural network parameters
STR_MODELS = ["steering_model_1.pt", "steering_model_2.pt", "steering_model_2b.pt",
"steering_model_3.pt", "steering_model_3b.pt"]
MODEL_FILENAME = STR_MODELS[4]
# swarm metaparameters
swarm_pos_int = 50
swarm_spd_int = 20
swarm_spd0 = 0
swarm_spd_n = 20
swarm_expl_int = 40
class Final_Driver(Driver):
def __init__(self, steering_values, global_max_speed):
""" Short description
Multiline description on
details and usage
Args:
steering_values: ???
global_max_speed: ???
"""
super(Final_Driver, self).__init__()
self.iter = 0
self.basic_control = BasicControl(steering_values)
self.back_up_driver = CrisisDriver(logdata=False)
self.bad_counter = 0
self.lap_counter = 0
self.last_opponents = [0 for x in range(36)]
self.global_max_speed = global_max_speed
self.max_speed = global_max_speed
self.cummulative_time = 0
if ENABLE_SWARM:
self.swarm = FeromoneTrail(
swarm_pos_int, swarm_spd_int,
swarm_spd0, swarm_spd_n,
swarm_expl_int, self.global_max_speed)
self.crashed_in_last_frame = False
self.contact_in_last_frame = False
self.previous_frame_position = 0
if ENABLE_NETWORK:
self.steering_model = load_model(MODEL_FILENAME)
def drive(self, carstate: State) -> Command:
""" Description
Args:
carstate: All parameters packed in a State object
Returns:
command: The next move packed in a Command object
"""
self.iter += 1
self.back_up_driver.update_status(carstate)
# trackers
self.update_trackers(carstate)
if PRINT_STATE:# and (self.iter % PRINT_CYCLE_INTERVAL) == 0:
self.print_trackers(carstate, r=True)
# crash and collision detection for swarm
if ENABLE_SWARM:
if self.back_up_driver.needs_help or self.back_up_driver.is_off_road:
self.crashed_in_last_frame = True
if not self.crashed_in_last_frame:
debug(self.iter, "SWARM: crashed")
for dist in carstate.opponents:
if dist == 0:
self.contact_in_last_frame = True
# crisis handling
if ENABLE_CRISIS_DRIVER:
if self.back_up_driver.is_in_control:
return self.back_up_driver.drive(carstate)
elif self.back_up_driver.needs_help:
self.back_up_driver.pass_control(carstate)
return self.back_up_driver.drive(carstate)
# since the data and python's values differ we need to adjust them
try:
carstate.angle = radians(carstate.angle)
carstate.speed_x = carstate.speed_x*3.6
command = self.make_next_command(carstate)
except Exception as e:
err(self.iter, str(e))
command = self.back_up_driver.driver.drive(carstate)
return command
def make_next_command(self, carstate):
""" Description
Args:
carstate: The full carstate object as passed to Driver()
Returns:
command: The command object to pass back to the server
"""
# checking in on the swarm
position = carstate.distance_from_start
position = int(position - (position % self.swarm.pos_int))
new_frame = position > (self.previous_frame_position + self.swarm.pos_int)
new_lap = self.previous_frame_position > (position + self.swarm.pos_int)
if ENABLE_SWARM and (new_frame or new_lap):
self.max_speed = self.swarm.check_in(
position,
carstate.speed_x,
self.crashed_in_last_frame,
self.contact_in_last_frame)
self.crashed_in_last_frame = False
self.contact_in_last_frame = False
self.previous_frame_position = position
err(self.iter, "SWARM: pos=%i, max_speed=%i" %(position, self.max_speed))
# basic predictions
if ENABLE_NETWORK:
steer_pred = self.steering_model.predict([carstate.angle, carstate.speed_x]
+ list(carstate.distances_from_edge)
+ [carstate.distance_from_center])
steer_pred = steer_pred[0]
else:
steer_pred = self.basic_control.steer_decider(carstate)
gear = self.basic_control.gear_decider(carstate)
pedal = self.basic_control.speed_decider(carstate, max_speed=self.max_speed)
# making sure we don't drive at people
opponents_deltas = list(map(sub, carstate.opponents, self.last_opponents))
steer_pred, pedal = self.basic_control.deal_with_opponents(steer_pred,
pedal,
carstate.speed_x,
carstate.distance_from_center,
carstate.opponents,
opponents_deltas)
# if too fast descelerate to max speed
if carstate.speed_x > self.max_speed:
pedal = 0.0
err(self.iter, "MAIN: capping speed")
# disambiguating pedal with smoothing
brake, accel = self.basic_control.disambiguate_pedal(pedal, accel_cap=1.0)
# debug output
if PRINT_COMMAND and self.iter % PRINT_CYCLE_INTERVAL:
print("Executing comand: gear=%.2f, acc=%.2f," %(gear, accel),
"break=%.2f, steering=%.2f" %(brake, steer_pred))
# command construction
command = Command()
command.brake = brake
command.accelerator = accel
command.steering = steer_pred
command.gear = gear
if command.steering > 0.10:
debug(self.iter, "BASIC: turning left")
elif command.steering < -0.10:
debug(self.iter, "BASIC: turning right")
return command
def update_trackers(self, carstate):
""" Updates info about the race """
self.iter += 1
if abs(carstate.current_lap_time) < 0.020:
self.lap_counter += 1
self.cummulative_time += carstate.last_lap_time + self.cummulative_time
def print_trackers(self, carstate, r=False):
""" Prints info on the race """
line_end = '\r' if r else '\n'
print(" Lap=%i CurLapTime=%.2f dist=%.2f time=%.2f"
%(self.lap_counter,
carstate.current_lap_time,
carstate.distance_raced,
self.cummulative_time + carstate.current_lap_time)
, end=line_end)
```
#### File: joramwessels/torcs-client/driver_utils.py
```python
from sys import stderr
from numpy import sign
# as stated by Torcs documentation
MAX_WHEEL_ROTATION = 21
# confusing polarities
STR_R, STR_L = -1, 1
ANG_R, ANG_L = 1, -1
DFC_R, DFC_L = -1, 1
# printing
ENABLE_DEBUG_INFO = True
PRINT_CYCLE_INTERVAL = 50 # freqency of print output in game cycles
PRINT_STATE = True
PRINT_COMMAND = False
def to_ang(ang):
""" Steers towards the road angle
Args:
ang: The angle of the car with the road
Returns:
The angle to steer in
"""
if sign(ang) == ANG_R:
return STR_L
elif sign(ang) == ANG_L:
return STR_R
else:
return 0
def away_from_ang(ang):
""" Steers away from the road angle
Args:
ang: The angle of the car with the road
Returns:
The angle to steer in
"""
return -to_ang(ang)
def debug(iter, *args):
""" prints debug info to stderr """
if iter % PRINT_CYCLE_INTERVAL == 0:
err(iter, *args)
def err(iter, *args):
if ENABLE_DEBUG_INFO:
spc = 6-len(str(iter))
ovwr = 50 - len(' '.join([str(x) for x in args]))
print(iter, ' '*spc, *args, ' '*ovwr, file=stderr)
```
#### File: joramwessels/torcs-client/ffnn_steer.py
```python
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
import sys
import numpy as np
import argparse
import driver_support
import math
from os import listdir
from os.path import isfile, join
class Steer(nn.Module):
def __init__(self, hidden_dimension):
super(Steer, self).__init__()
n_states = 22
n_actions = 1
self.layer_1 = nn.Linear(n_states, hidden_dimension)
self.non_lin = nn.Sigmoid()
self.layer_2 = nn.Linear(hidden_dimension, n_actions)
def forward(self, inputs):
out = self.layer_1(inputs)
out = self.non_lin(out)
out = self.layer_2(out)
return out
def carstate_to_variable(carstate):
# y=speed, x=angle, distance*19, distToMiddle
return Variable(torch.FloatTensor([math.radians(carstate.angle), carstate.speed_x] + list(carstate.distances_from_edge) + [carstate.distance_from_center]), requires_grad=True)
def create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):
# Read in the data
training = []
for file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:
training += list(driver_support.read_lliaw_dataset_steer_angle_speed_dist_middle(file_in))
model = Steer(hidden_dimension)
print(model)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
loss = nn.MSELoss()
for ITER in range(epochs):
train_loss = 0.0
start = time.time()
for y_true, state in training:
# forward pass
optimizer.zero_grad()
in_state = Variable(torch.FloatTensor(state))
y_pred = model(in_state)
y_true = Variable(torch.FloatTensor(y_true))
output = loss(y_pred, y_true)
train_loss += output.data[0]
# backward pass
output.backward()
# update weights
optimizer.step()
print("last prediction made:", y_pred, y_true)
print("iter %r: train loss/action=%.4f, time=%.2fs" %(ITER, train_loss/len(training), time.time()-start))
torch.save(model.state_dict(), out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int)
parser.add_argument('--hidden', type=int)
parser.add_argument('--learn', type=float)
parser.add_argument('--in_file', type=str)
parser.add_argument('--out_file', type=str)
args = parser.parse_args()
create_model(args.out_file, args.in_file, args.learn, args.epochs, args.hidden)
if __name__ == "__main__":
main()
```
#### File: joramwessels/torcs-client/mlp_driver.py
```python
from pytocl.driver import Driver
from pytocl.car import State, Command
import mlp
class MyDriver(Driver):
def __init__(self, model_file="mlp_100x100.pt"):
mlp.use_cuda = False
self.model = mlp.load_model(model_file)
self.it = 0
def drive(self, carstate):
self.it += 1
x = [carstate.angle, carstate.speed_x,
carstate.speed_y, carstate.speed_z] + \
list(carstate.distances_from_edge) + \
[carstate.distance_from_center]
pred_y = list(self.model.predict(x).data)[0]
command = Command()
command.accelerator = pred_y[0]
command.brake = pred_y[1]
command.steering = pred_y[2]
gear_flt = pred_y[3] if self.it > 750 else self.it/250.0
command.gear = min(5, max(1, int(gear_flt + 0.5)))
print(self.it,"acc: %.2f, brk: %.2f, ste: %.2f, gea: %.2f"
%(command.accelerator, command.brake,
command.steering, gear_flt), end='\r')
return command
```
#### File: joramwessels/torcs-client/torcs_tournament.py
```python
import logging
import os
import re
import pwd
import csv
import time
import shutil
import psutil
import pathlib
import datetime
import subprocess
from collections import OrderedDict, abc
import elo
import yaml
from bs4 import BeautifulSoup
DROPBOX_DEBUG = logging.DEBUG - 1
logger = logging.getLogger(None if __name__ == '__main__' else __name__)
def path_rel_to_dir(path, direcotry):
if not os.path.isabs(path):
path = os.path.join(direcotry, path)
return path
def really_running(proc):
"""Check whether a process is running _and_ isn't a zombie"""
return proc.is_running() and proc.status() != psutil.STATUS_ZOMBIE
class OrderedLoader(yaml.Loader):
def construct_mapping(self, node, deep=False):
# self.flatten_mapping(node)
return OrderedDict(self.construct_pairs(node, deep))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
OrderedLoader.construct_mapping
)
class ParseError(Exception):
pass
class Player(object):
"""
Container for player information.
Every argument of `start_command` will be formatted using
`format(port=<value>)`
`start_command` is issued with `working_dir` as working directory and
`process_owner` as user. If `process_owner` is None, `token` will be used.
The filenames `stdout` and `stderr` are relative to `output_dir`.
"""
def __init__(self, token, working_dir, rating=None,
start_command=['./start.sh', '-p', '{port}'],
output_dir='./output/',
stdout='./{timestamp}-stdout.txt',
stderr='./{timestamp}-stderr.txt',
message_file='./current_rating.txt',
rating_message="Your current rating is: {rating}",
rank_message="You are ranked {rank} out of {total}",
process_owner=None):
self.token = token
self.working_dir = working_dir
if rating is not None:
self.rating = elo.RATING_CLASS(rating)
else:
self.init_rating()
self.start_command = start_command
self.output_dir = path_rel_to_dir(output_dir, self.working_dir)
self.stdout = path_rel_to_dir(stdout, self.output_dir)
self.stderr = path_rel_to_dir(stderr, self.output_dir)
self.message_file = path_rel_to_dir(message_file, self.output_dir)
self.rating_message = rating_message
self.rank_message = rank_message
self.process_owner = process_owner \
if process_owner is not None \
else self.token
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def __str__(self):
return self.__class__.__name__ + "({self.token!r}, " \
"{self.rating!r}" \
")".format(self=self)
def __repr__(self):
return self.__class__.__name__ + "(" \
"{self.token!r}, " \
"{self.working_dir!r}, " \
"{self.rating!r}, " \
"{self.start_command!r}, " \
"{self.output_dir!r}, " \
"{self.stdout!r}, " \
"{self.stderr!r}, " \
"{self.message_file!r}, " \
"{self.rating_message!r}, " \
"{self.process_owner!r}" \
")".format(self=self)
def init_rating(self):
self.rating = elo.RATING_CLASS(elo.INITIAL)
class Rater(object):
def __init__(self, players=(), filename=None,
ignore_unknown_players=False):
self.player_map = {}
for player in players:
self.add_player(player)
self.filename = filename
self.ignore_unknown_players = ignore_unknown_players
if self.filename is not None and os.path.exists(self.filename):
self.read_file()
def add_player(self, player):
"""Add a player to this rater."""
if player.token in self.player_map:
raise ValueError(
"A token may only be specified once. Token: {}".format(
player.token
)
)
self.player_map[player.token] = player
def filename_check(self, filename=None):
if filename is None:
if self.filename is None:
raise ValueError(
"Please specify a filename as argument or assign it to"
" `self.filename`."
)
else:
filename = self.filename
return filename
def read_file(self, filename=None):
filename = self.filename_check(filename)
with open(filename) as fd:
self.set_ratings(map(self.clean_line, csv.reader(fd)))
def set_ratings(self, iterable):
tokens = set()
for line in iterable:
token = line[0]
if token in tokens:
raise ValueError(
"A token may only be specified once. Token: {}".format(
token
)
)
tokens.add(token)
if len(line) > 2:
raise ValueError(
"No extra information next to a token and the desired "
"rating should be specified: {}".format(line)
)
if len(line) == 2:
if token in self.player_map:
self.player_map[token].rating = elo.RATING_CLASS(line[1])
elif not self.ignore_unknown_players:
raise ValueError(
"Rating specified for unknown player: {}".format(token)
)
@staticmethod
def clean_line(iterable):
li = list(iterable)
if len(li) != 1 and len(li) != 2:
raise ValueError(
"A ratings file should only contain lines with one or two "
"values, got {}".format(li)
)
if len(li) == 2:
try:
li[1] = elo.RATING_CLASS(li[1])
except ValueError as error:
raise ValueError(
"The second value of a rating line should be "
"interpretable as {}. I received the following error "
"while casting:\n\t{}".format(
elo.RATING_CLASS.__name__,
error
)
)
return li
def save_ratings(self, filename=None):
"""
Save the ratings of all players to a file.
If a filename is specified, that file is used, otherwise
`self.filename` is used. If neither is specified, a ValueError is
raised.
"""
filename = self.filename_check(filename)
logger.info("Saving ratings in {}".format(filename))
with open(filename, 'w') as fd:
csv.writer(fd).writerows(
sorted(
((p.token, p.rating) for p in self.player_map.values()),
key=lambda p: p[1]
)
)
@staticmethod
def adjust_all(ranking):
"""
Adjust the ratings of given Players according to the ranked results.
In a ranking every player won from all players before it.
"""
ranking = list(ranking)
# Calculate new ratings
new_ratings = [
elo.rate(
player.rating,
[
((pi < oi), opponent.rating)
for oi, opponent in enumerate(ranking)
if opponent is not player
]
)
for pi, player in enumerate(ranking)
]
# Save new ratings
for player, rating in zip(ranking, new_ratings):
player.rating = rating
def restart(self):
for player in self.player_map.values():
player.init_rating()
class Controller(object):
def __init__(self, rater, queue, torcs_config_file,
server_stdout='{timestamp}-server_out.txt',
server_stderr='{timestamp}-server_err.txt',
separate_player_uid=False,
set_file_owner=False,
set_file_mode=False,
rater_backup_filename=None,
result_filename_format="{driver} - {base}",
timestamp_format='%Y-%m-%d-%H.%M',
result_path='~/.torcs/results/',
torcs_command=['torcs', '-r', '{config_file}'],
driver_to_port=OrderedDict([
('scr_server 1', 3001),
('scr_server 2', 3002),
('scr_server 3', 3003),
('scr_server 4', 3004),
('scr_server 5', 3005),
('scr_server 6', 3006),
('scr_server 7', 3007),
('scr_server 8', 3008),
('scr_server 9', 3009),
('scr_server 10', 3010),
]),
raise_on_too_fast_completion=True,
torcs_min_time=1,
torcs_child_wait=0.5,
shutdown_wait=1,
crash_check_wait=0.2,
file_mode=0o700):
"""
Orchestrate the races and save the ratings.
When the rating is left out of the ratings file for a token, it is
assigned the default rating, which will be saved to the same file
when running `save_ratings`.
N.B. `~` is only expanded to the user directory in `result_path` at
initialisation of the controller.
"""
self.rater = rater
self.queue = queue
self.torcs_config_file = torcs_config_file
self.server_stdout = server_stdout
self.server_stderr = server_stderr
self.separate_player_uid = separate_player_uid
self.set_file_owner = set_file_owner
self.set_file_mode = set_file_mode
self.rater_backup_filename = rater_backup_filename
self.result_filename_format = result_filename_format
self.timestamp_format = timestamp_format
self.result_path = os.path.expanduser(result_path)
self.torcs_command = torcs_command
self.driver_to_port = driver_to_port
self.raise_on_too_fast_completion = raise_on_too_fast_completion
self.torcs_min_time = torcs_min_time
self.torcs_child_wait = torcs_child_wait
self.shutdown_wait = shutdown_wait
self.crash_check_wait = crash_check_wait
self.file_mode = file_mode
logger.debug("Result path: {}".format(self.result_path))
# Read drivers from config
self.drivers = self.read_lineup(self.torcs_config_file)
def timestamp(self):
return datetime.datetime.now().strftime(self.timestamp_format)
@staticmethod
def rank_text(rank):
if rank == 0:
return '1st'
elif rank == 1:
return '2nd'
elif rank == 2:
return '3rd'
else:
return str(rank + 1) + 'th'
@staticmethod
def read_ranking(results_file):
"""
Return a ranked list of driver names read from the given results file.
NB. Driver names are _not_ tokens. One should first look up which token
corresponds with which driver name.
"""
with open(results_file) as fd:
soup = BeautifulSoup(fd, 'xml')
result_soup = soup.find('section', attrs={'name': 'Results'})
rank_soup = result_soup.find('section', attrs={'name': 'Rank'})
ranks = [
(
int(section['name']),
section.find('attstr', attrs={'name': 'name'})['val']
)
for section in rank_soup.findAll('section')
]
return list(zip(*sorted(ranks)))[1]
@staticmethod
def read_lineup(torcs_config_file):
with open(torcs_config_file) as fd:
soup = BeautifulSoup(fd, 'xml')
drivers_sec = soup.find('section', attrs={'name': 'Drivers'})
drivers = []
for sec in drivers_sec.findAll('section'):
tag, attrs = 'attstr', {'name': 'module'}
module = sec.find(tag, attrs=attrs)
if module is None:
raise ParseError(
"Error parsing {file}: expected a {tag} tag with the "
"following attributes: {attrs!r}".format(
file=torcs_config_file,
tag=tag,
attrs=attrs
)
)
expected = 'scr_server'
if module.get('val', Exception()) != expected:
raise ParseError(
"Error parsing {file}: all drivers are expected to be the "
"'{expected}' module.".format(
file=torcs_config_file,
expected=expected
)
)
tag, attrs = 'attnum', {'name': 'idx'}
idx = sec.find(tag, attrs=attrs)
if idx is None:
raise ParseError(
"Error parsing {file}: expected a {tag} tag with the "
"following attributes: {attrs!r}".format(
file=torcs_config_file,
tag=tag,
attrs=attrs
)
)
val = idx.get('val', None)
if val is None:
raise ParseError(
"Error parsing {file}: expected {tag} to have the "
"attribute {attr}.".format(
file=torcs_config_file,
tag=tag,
attr='val',
)
)
drivers.append((sec['name'], val))
# I now have a list of (rank, id) pairs
# Somehow, the number in the name of the scr_server driver is one
# larger that the `idx` of the driver.
return [
'scr_server {}'.format(int(idx) + 1)
for _, idx in sorted(drivers)
]
def restart(self):
"""Restart the tournament, making all ratings equal."""
self.rater.restart()
def race_and_save(self, simulate=False):
"""
Run a race (see `Controller.race`) and save the ratings.
"""
self.race(simulate=simulate)
self.rater.save_ratings()
def race(self, simulate=False):
"""
Run a race
Automatically determine the number of players to be raced and ask the
queue which players are next. Race the players, save the results and
update the queue.
"""
players = self.queue.first_n(len(self.drivers))
logger.info("Racing: {}".format(', '.join(
repr(player.token) for player in players
)))
self.race_once(players, simulate=simulate)
self.queue.requeue(players)
def race_tokens(self, tokens, simulate=False):
return self.race_once(
map(self.rater.player_map.get, tokens),
simulate=simulate
)
def race_once(self, players, simulate=False):
"""
Run one race with TORCS and the given players.
Also make a backup of the ratings if `self.rater_backup_filename` is
not None.
NB. Please make sure the number of players given matches the specified
number of players in the configuration file of this Controller.
The output can be found under:
<torcs installation directory>/results
"""
players = list(players)
if len(self.drivers) != len(players):
raise ValueError(
"{nplay} players where given, but {file} specifies {ndriv} "
"drivers".format(
nplay=len(players),
ndriv=len(self.drivers),
file=self.torcs_config_file
)
)
driver_to_player = OrderedDict(zip(self.drivers, players))
open_files = []
processes = []
try:
# Start server
server_stdout = open(
self.server_stdout.format(timestamp=self.timestamp()),
'w'
)
open_files.append(server_stdout)
server_stderr = open(
self.server_stderr.format(timestamp=self.timestamp()),
'w'
)
open_files.append(server_stderr)
logger.info("Starting TORCS...")
if simulate:
logger.warning(
"This is a simulation! No child processes are started."
)
else:
logger.debug(
"TORCS config to use: {}".format(self.torcs_config_file)
)
config_file = os.path.abspath(self.torcs_config_file)
logger.debug("TORCS config to use: {}".format(config_file))
command = list(map(
lambda s: s.format(
config_file=config_file
),
self.torcs_command
))
logger.debug("TORCS command to be run: {}".format(command))
server_process = psutil.Popen(
command,
stdout=server_stdout,
stderr=server_stderr,
)
processes.append(server_process)
# TORCS starts a child process, which doesn't terminate
# automatically if `server_process` is terminated or crashes.
time.sleep(self.torcs_child_wait)
children = server_process.children()
logger.debug("TORCS server children: {}".format(children))
processes.extend(children)
# Start players
logger.info("Starting players...")
for driver, player in driver_to_player.items():
stdout = open(
player.stdout.format(timestamp=self.timestamp()),
'w'
)
open_files.append(stdout)
stderr = open(
player.stderr.format(timestamp=self.timestamp()),
'w'
)
open_files.append(stderr)
# Set the ownership of the files
if self.set_file_owner:
self.change_owner(player)
if self.set_file_mode:
self.change_mode(player)
if simulate:
# Always simulate these functions, just to be sure they
# work
self.get_change_user_fn(player)
self.get_player_env(player)
elif self.separate_player_uid:
processes.append(psutil.Popen(
map(
lambda s: s.format(
port=self.driver_to_port[driver]
),
player.start_command
),
stdout=stdout,
stderr=stderr,
preexec_fn=self.get_change_user_fn(player),
cwd=player.working_dir,
env=self.get_player_env(player)
))
else:
processes.append(psutil.Popen(
map(
lambda s: s.format(
port=self.driver_to_port[driver]
),
player.start_command
),
stdout=stdout,
stderr=stderr,
cwd=player.working_dir,
))
logger.debug("Started {}".format(player))
time.sleep(self.crash_check_wait)
# Check no one crashed in the mean time
for proc in processes:
if not really_running(proc):
name = proc.name() if hasattr(proc, 'name') else proc
raise subprocess.CalledProcessError(
proc.poll() if hasattr(proc, 'poll') else 0,
list(proc.args) or name
)
# Wait for server
logger.info("Waiting for TORCS to finish...")
start_time = time.time()
if not simulate:
server_process.wait()
end_time = time.time()
# Time TORCS ran in seconds
diff_time = end_time - start_time
if not simulate and diff_time < self.torcs_min_time:
logger.warning(
"TORCS only ran for {:.2f} seconds".format(diff_time)
)
if self.raise_on_too_fast_completion:
raise subprocess.SubprocessError(
"TORCS only took {:.2f} seconds to complete".format(
diff_time
)
)
logger.debug("Finished!")
# Check exit status of TORCS
# However, even if something goes wrong, the exit status is 0,
# so I can't know if something went wrong.
# logger.debug("really_running(server_process): {}".format(
# really_running(server_process)
# ))
# logger.debug("server_process.returncode: {}".format(
# server_process.returncode
# ))
# if server_process.returncode:
# raise subprocess.CalledProcessError(
# proc.returncode,
# proc.args
# )
except:
logger.error("An error occurred, trying to stop gracefully...")
raise
finally:
# Exit running processes
if not simulate:
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# First be nice
for proc in processes:
if really_running(proc):
logger.info("Terminating {}".format(proc))
proc.terminate()
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# Time's up
for proc in processes:
if really_running(proc):
logger.warning("Killing {}".format(proc))
proc.kill()
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# Double check
for proc in processes:
if really_running(proc):
logger.error(
"The following process could not be killed: {}"
.format(proc.cmdline())
)
# Close all open files
for fd in open_files:
logger.debug("Closing {}".format(fd.name))
try:
fd.close()
except Exception as e:
logger.error(e)
logger.info("Closed all files and processes!")
# Give the players the server output
for player in players:
shutil.copyfile(
server_stdout.name,
os.path.join(
player.output_dir,
os.path.basename(server_stdout.name)
)
)
shutil.copyfile(
server_stderr.name,
os.path.join(
player.output_dir,
os.path.basename(server_stderr.name)
)
)
# End of `finally` clause
# Find the correct results file
logger.debug("Result path: {}".format(self.result_path))
out_dir = os.path.join(
self.result_path,
# remove head path and extension
'.'.join(os.path.split(self.torcs_config_file)[1].split('.')[:-1])
)
out_base = sorted(os.listdir(out_dir))[-1]
out_file = os.path.join(
out_dir,
out_base
)
# Give the players the results file
for driver, player in driver_to_player.items():
shutil.copyfile(
out_file,
os.path.join(
player.output_dir,
self.result_filename_format.format(
driver=driver,
base=out_base
)
)
)
# Update ratings according to ranking
ranked_drivers = self.read_ranking(out_file)
self.rater.adjust_all(map(driver_to_player.get, ranked_drivers))
# Make a backup if self.rater_backup_filename is given
if self.rater_backup_filename is not None:
backup_filename = self.rater_backup_filename.format(
timestamp=self.timestamp()
)
# logger.info("Backing up ratings in {}".format(backup_filename))
self.rater.save_ratings(
backup_filename
)
# Tell players their own rating and rank
sorted_players = sorted(
self.rater.player_map.values(),
key=lambda p: p.rating,
reverse=True
)
total = len(sorted_players)
for rank, player in enumerate(sorted_players):
with open(player.message_file, 'w') as fd:
fd.write(player.rating_message.format(rating=player.rating))
fd.write('\n')
fd.write(
player.rank_message.format(
rank=self.rank_text(rank),
total=total
)
)
fd.write('\n')
def change_owner(self, player):
"""
Make `player.process_owner` the owner of all files in
`player.working_dir`
"""
pw_record = pwd.getpwnam(player.process_owner)
logger.debug(
"Changing file ownership for {}".format(player.token)
)
for dirpath, _, filenames in os.walk(player.working_dir):
# Change directory ownership
os.chown(dirpath, pw_record.pw_uid, pw_record.pw_gid)
# Change file ownership
for filename in filenames:
os.chown(
os.path.join(dirpath, filename),
pw_record.pw_uid,
pw_record.pw_gid
)
def change_mode(self, player, mode=None):
"""
Make `player.process_owner` the owner of all files in
`player.working_dir`
"""
if mode is None:
mode = self.file_mode
logger.debug(
"Changing file mode for {}".format(player.token)
)
for dirpath, _, filenames in os.walk(player.working_dir):
# Change directory mode
os.chmod(dirpath, mode)
# Change file mode
for filename in filenames:
os.chmod(os.path.join(dirpath, filename), mode)
@staticmethod
def get_change_user_fn(player):
pw_record = pwd.getpwnam(player.process_owner)
def change_user():
logger.debug(
"Starting demotion. UID: {uid}, GID: {gid}".format(
uid=os.getuid(),
gid=os.getgid()
)
)
try:
logger.debug("Trying to set gid...")
os.setgid(pw_record.pw_gid)
logger.debug("Trying to set uid...")
os.setuid(pw_record.pw_uid)
except Exception as e:
logger.error(e)
raise
logger.debug(
"Finished demotion. UID: {uid}, GID: {gid}".format(
uid=os.getuid(),
gid=os.getgid()
)
)
return change_user
@staticmethod
def get_player_env(player):
# Info from https://stackoverflow.com/questions/1770209/run-child-processes-as-different-user-from-a-long-running-process/6037494#6037494 # NOQA
pw_record = pwd.getpwnam(player.process_owner)
env = os.environ.copy()
env['LOGNAME'] = env['USER'] = pw_record.pw_name
env['HOME'] = pw_record.pw_dir
logger.debug("ENV PWD: {}".format(env.get('PWD', None)))
env['PWD'] = player.working_dir
logger.debug("Set PWD to: {!r}".format(env['PWD']))
logger.debug("PATH: {}".format(env['PATH']))
return env
@classmethod
def load_config(cls, config_file, extra_config={}):
"""
Load a controller from the given config file.
NB. Only the first layer of `extra_config` is merged, everything else
is overwritten, e.g.:
original_config = {
'test': {
'test-one': 'hello'
'test-two': {
'test-two-one': 'bye'
}
}
}
extra_config = {
'test': {
'test-two': {
'test-two-two': 'override'
}
'test-three': 'added'
}
}
results in:
config = {
'test': {
'test-one': 'hello'
'test-two': {
'test-two-two': 'override'
}
'test-three': 'added'
}
}
"""
error_regex = re.compile(
r"__init__\(\) got an unexpected keyword argument '(\w+)'"
)
with open(config_file) as fd:
config = yaml.load(fd, OrderedLoader)
for key, value in extra_config.items():
if isinstance(value, abc.Mapping):
cur_conf = config.setdefault(key, {})
cur_conf.update(value)
else:
config[key] = value
logger.debug("Config: {}".format(config))
try:
rater = cls.load_rater(config)
fbq = cls.load_fbq(config, rater.player_map.values())
controller = cls(rater, fbq, **config.get('controller', {}))
except TypeError as e:
match = error_regex.fullmatch(e.args[0])
if match is not None:
config_key = match.groups()[0]
logger.debug("Match: {}".format(config_key))
raise ValueError(
"Unexpected configuration key in {filename}: {key!r}"
.format(filename=config_file, key=config_key)
) from e
else:
logger.debug("No match...")
raise
return controller
@classmethod
def load_rater(cls, config_dic):
players = cls.load_players(config_dic)
rater = Rater(players, **config_dic.get('rater', {}))
return rater
@staticmethod
def load_players(config_dic):
key = 'players'
if key not in config_dic:
raise ValueError(
"No players specified! Expected a {!r} key in the"
" configuration file.".format(key)
)
players = config_dic[key]
if not isinstance(players, abc.Mapping):
# If it's not a mapping, I'm assuming I can open it.
the_exception = TypeError(
"Expected {key!r} to point to a {{token: config}} mapping or"
" a path to a .yml file containing a {{token: config}}"
" mapping. Instead I found: {players!r}".format(
key=key,
players=players
)
)
fd = None
try:
fd = open(players)
except Exception as e:
raise the_exception from e
else:
players = yaml.load(fd, OrderedLoader)
finally:
if fd is not None:
fd.close()
# logger.debug("Closed players config!")
if not isinstance(players, abc.Mapping):
raise the_exception
return [
Player(token, **player_conf)
for token, player_conf in players.items()
]
@staticmethod
def load_fbq(config_dic, players=()):
return FileBasedQueue(players, **config_dic.get('queue', {}))
class DropboxDisablingController(Controller):
def __init__(self, *args, dropbox_start_command=['dropbox', 'start'],
dropbox_stop_command=['dropbox', 'stop'], start_dropbox=False,
stop_dropbox=False, **kwargs):
self.dropbox_start_command = dropbox_start_command
self.dropbox_stop_command = dropbox_stop_command
self.start_dropbox = start_dropbox
self.stop_dropbox = stop_dropbox
super(DropboxDisablingController, self).__init__(*args, **kwargs)
def race_once(self, *args, **kwargs):
"""Disable Dropbox before racing and start it again afterwards."""
try:
# Try to disable Dropbox
# The catch is that the return status of the Dropbox control script
# is always 0, even if something went wrong...
if self.stop_dropbox:
logger.info("Stopping Dropbox...")
completed = subprocess.run(
self.dropbox_stop_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
logger.info("Dropbox says:\n{}".format(
completed.stdout.decode()
))
del completed
# Race
return super(DropboxDisablingController, self).race_once(
*args,
**kwargs
)
finally:
if self.start_dropbox:
# Enable Dropbox
logger.info("Starting Dropbox...")
# Somehow stderr captures the output of the started Dropbox
# daemon. However, capturing it isn't an option because the
# daemon doesn't stop, which means the stream will hang. Thus
# if you want to see its output, we'll just leave it as is,
# otherwise we'll squelch the daemon's output.
stderr = None if logger.getEffectiveLevel() <= DROPBOX_DEBUG \
else subprocess.DEVNULL
completed = subprocess.run(
self.dropbox_start_command,
stdout=subprocess.PIPE,
stderr=stderr
)
logger.info("Dropbox says:\n{}".format(completed.stdout.decode()))
del completed
class FileBasedQueue(object):
"""
Queue players according to the last modified time of a specifically named
file in their `working_dir`.
"""
def __init__(self, players, filename='start.sh'):
self.filename = filename
self.players = list(players)
@staticmethod
def touch(filename):
"""
Touch a file.
I.E. create it if it does not exist or change the last modified time
to the current time if it does.
"""
logger.debug("Touching: {}".format(filename))
pathlib.Path(filename).touch()
logger.debug("Touched!")
@staticmethod
def get_last_modified(filename):
modified_time = os.path.getmtime(filename)
logger.debug("Filename: {}".format(filename))
logger.debug("Modified time: {}".format(modified_time))
return modified_time
def get_filename(self, player):
"""Get the full path to the queue file of a player"""
return os.path.join(
player.working_dir,
self.filename
)
def first_n(self, n):
"""
Get the `n` players that are first in line
"""
return sorted(
self.players,
key=lambda p: self.get_last_modified(self.get_filename(p)),
# reverse=True,
)[:n]
def requeue(self, players):
"""
Put the given players at the end of the queue
In this case this is done by touching their respective queue files
in the order the players are passed.
"""
for player in players:
self.touch(self.get_filename(player))
def log_level_type(string):
try:
value = int(string)
except:
value = string
return value
if __name__ == '__main__':
# Parse command line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('config_file', help="YAML file with configuration")
parser.add_argument('-l', '--level', default='INFO', type=log_level_type,
help="Logging level to use")
parser.add_argument(
'-s',
'--simulate',
action='store_true',
help="Attempts to mimic a full run without starting child processes."
" May fail if no old TORCS output files are present in the expected"
" directory.")
parser.add_argument(
'--start-dropbox',
action='store_true',
help="Start Dropbox again after the race."
)
parser.add_argument(
'--stop-dropbox',
action='store_true',
help="Stop Dropbox before the race. Implies --start-dropbox."
)
args = parser.parse_args()
# Initialise logging
logging.basicConfig(level=args.level)
extra_config = {}
if args.stop_dropbox:
control_config = extra_config.setdefault('controller', {})
control_config['stop_dropbox'] = True
if args.start_dropbox or args.stop_dropbox:
control_config = extra_config.setdefault('controller', {})
control_config['start_dropbox'] = True
# Race
# controller = Controller.load_config(args.config_file)
controller = DropboxDisablingController.load_config(
args.config_file,
extra_config
)
controller.race_and_save(simulate=args.simulate)
logger.info("Done!")
``` |
{
"source": "jorana/DirectDmTargets",
"score": 2
} |
#### File: DirectDmTargets/tests/test_plots.py
```python
import tempfile
import DirectDmTargets as dddm
import matplotlib.pyplot as plt
import numpy as np
from hypothesis import given, strategies
def test_ll_s():
dddm.plot_basics.plt_ll_sigma_spec(bins=2)
plt.clf()
plt.close()
def test_ll_m():
dddm.plot_basics.plt_ll_mass_spec(bins=3)
plt.clf()
plt.close()
def test_plt_b():
dddm.plot_basics.plt_priors(itot=10)
plt.clf()
plt.close()
def test_ll_function():
dddm.plot_basics.show_ll_function(20)
plt.clf()
plt.close()
def test_simple_hist():
dddm.plot_basics.simple_hist(np.linspace(0, 3, 3))
with tempfile.TemporaryDirectory() as tmpdirname:
dddm.plot_basics.save_canvas('test', save_dir=tmpdirname)
plt.clf()
plt.close()
@given(strategies.floats(0, 2),
)
def test_get_color(a):
dddm.plot_basics.get_color_from_range(a, _range=(0, max(1, a)))
``` |
{
"source": "JoranAngevaare/dddm",
"score": 2
} |
#### File: dddm/dddm/context.py
```python
import inspect
import os
import warnings
from socket import getfqdn
import pandas as pd
from immutabledict import immutabledict
import typing as ty
import dddm
import numpy as np
export, __all__ = dddm.exporter()
__all__ += ['log']
context = {}
log = dddm.utils.get_logger('dddm')
_naive_tmp = '/tmp/'
_host = getfqdn()
base_detectors = [
dddm.detectors.examples.XenonSimple,
dddm.detectors.examples.ArgonSimple,
dddm.detectors.examples.GermaniumSimple,
dddm.detectors.xenon_nt.XenonNtNr,
dddm.detectors.xenon_nt.XenonNtMigdal,
dddm.detectors.super_cdms.SuperCdmsHvGeNr,
dddm.detectors.super_cdms.SuperCdmsHvSiNr,
dddm.detectors.super_cdms.SuperCdmsIzipGeNr,
dddm.detectors.super_cdms.SuperCdmsIzipSiNr,
dddm.detectors.super_cdms.SuperCdmsHvGeMigdal,
dddm.detectors.super_cdms.SuperCdmsHvSiMigdal,
dddm.detectors.super_cdms.SuperCdmsIzipGeMigdal,
dddm.detectors.super_cdms.SuperCdmsIzipSiMigdal,
]
class Context:
"""Centralized object for managing:
- configurations
- files
- detector objects
"""
_directories = None
_detector_registry = None
_samplers = immutabledict({
'nestle': dddm.samplers.nestle.NestleSampler,
'multinest': dddm.samplers.pymultinest.MultiNestSampler,
'emcee': dddm.samplers.emcee.MCMCStatModel,
'multinest_combined': dddm.samplers.multi_detectors.CombinedMultinest,
'nestle_combined': dddm.samplers.multi_detectors.CombinedNestle,
})
_halo_classes = immutabledict({
'shm': dddm.SHM,
'shielded_shm': dddm.ShieldedSHM,
})
def register(self, detector: dddm.Experiment):
"""Register a detector to the context"""
if self._detector_registry is None:
self._detector_registry = {}
existing_detector = self._detector_registry.get(detector.detector_name)
if existing_detector is not None:
log.warning(f'replacing {existing_detector} with {detector}')
self._check_detector_is_valid(detector)
self._detector_registry[detector.detector_name] = detector
def set_paths(self, paths: dict, tolerant=False):
if self._directories is None:
self._directories = {}
for reference, path in paths.items():
if not os.path.exists(path):
try:
os.mkdir(path)
except Exception as e:
if tolerant:
warnings.warn(f'Could not find {path} for {reference}', UserWarning)
else:
raise FileNotFoundError(
f'Could not find {path} for {reference}'
) from e
result = {**self._directories.copy(), **paths}
self._directories = result
def show_folders(self):
result = {'name': list(self._directories.keys())}
result['path'] = [self._directories[name] for name in result['name']]
result['exists'] = [os.path.exists(p) for p in result['path']]
result['n_files'] = [(len(os.listdir(p)) if os.path.exists(p) else 0) for p in
result['path']]
return pd.DataFrame(result)
def get_detector(self, detector: str, **kwargs):
if detector not in self._detector_registry:
raise NotImplementedError(f'{detector} not in {self.detectors}')
return self._detector_registry[detector](**kwargs)
def get_sampler_for_detector(self,
wimp_mass,
cross_section,
sampler_name: str,
detector_name: ty.Union[str, list, tuple],
prior: ty.Union[str, dict],
halo_name='shm',
detector_kwargs: dict = None,
halo_kwargs: dict = None,
sampler_kwargs: dict = None,
fit_parameters=dddm.statistics.get_param_list(),
):
self._check_sampler_args(wimp_mass, cross_section, sampler_name, detector_name, prior,
halo_name, detector_kwargs, halo_kwargs, sampler_kwargs,
fit_parameters)
sampler_class = self._samplers[sampler_name]
# If any class needs any of the paths, provide those here.
sampler_kwargs = self._add_folders_to_kwargs(sampler_class, sampler_kwargs)
halo_kwargs = self._add_folders_to_kwargs(
self._halo_classes.get(halo_name), halo_kwargs)
halo_model = self._halo_classes[halo_name](**halo_kwargs)
# TODO instead, create a super detector instead of smaller ones
if isinstance(detector_name, (list, tuple)):
if not sampler_class.allow_multiple_detectors:
raise NotImplementedError(f'{sampler_class} does not allow multiple detectors')
detector_instance = [
self.get_detector(
det,
**self._add_folders_to_kwargs(self._detector_registry.get(det),
detector_kwargs)
)
for det in detector_name]
if halo_name == 'shielded_shm':
if len(locations := {d.location for d in detector_instance}) > 1:
raise ValueError(
f'Running with multiple locations for shielded_shm is not allowed. Got {locations}')
halo_kwargs.setdefault('log_mass', np.log10(wimp_mass))
halo_kwargs.setdefault('log_cross_section', np.log10(cross_section))
halo_kwargs.setdefault('location', list(locations)[0])
spectrum_instance = [dddm.DetectorSpectrum(
experiment=d, dark_matter_model=halo_model)
for d in detector_instance]
else:
detector_kwargs = self._add_folders_to_kwargs(
self._detector_registry.get(detector_name), detector_kwargs)
detector_instance = self.get_detector(detector_name, **detector_kwargs)
spectrum_instance = dddm.DetectorSpectrum(experiment=detector_instance,
dark_matter_model=halo_model)
if isinstance(prior, str):
prior = dddm.get_priors(prior)
return sampler_class(wimp_mass=wimp_mass,
cross_section=cross_section,
spectrum_class=spectrum_instance,
prior=prior,
fit_parameters=fit_parameters,
**sampler_kwargs
)
def _check_sampler_args(self,
wimp_mass,
cross_section,
sampler_name: str,
detector_name: ty.Union[str, list, tuple],
prior: ty.Union[str, dict],
halo_name='shm',
detector_kwargs: dict = None,
halo_kwargs: dict = None,
sampler_kwargs: dict = None,
fit_parameters=dddm.statistics.get_param_list(),
):
for det in dddm.utils.to_str_tuple(detector_name):
assert det in self._detector_registry, f'{det} is unknown'
assert wimp_mass < 200 and wimp_mass > 0.001, f'{wimp_mass} invalid'
assert np.log10(cross_section) < -20 and np.log10(
cross_section) > -60, f'{cross_section} invalid'
assert sampler_name in self._samplers, f'choose from {self._samplers}, got {sampler_name}'
assert isinstance(prior, (str, dict, immutabledict)), f'invalid {prior}'
assert halo_name in self._halo_classes, f'invalid {halo_name}'
def _add_folders_to_kwargs(self, function, current_kwargs: ty.Union[None, dict]) -> dict:
if function is None:
return
if current_kwargs is None:
current_kwargs = {}
takes = inspect.getfullargspec(function).args
for directory, path in self._directories.items():
if directory in takes:
current_kwargs.update({directory: path})
return current_kwargs
@property
def detectors(self):
return sorted(list(self._detector_registry.keys()))
@staticmethod
def _check_detector_is_valid(detector: dddm.Experiment):
detector()._check_class()
@export
def base_context():
context = Context()
installation_folder = dddm.__path__[0]
default_context = {
'software_dir': installation_folder,
'results_dir': os.path.join(installation_folder, 'DD_DM_targets_data'),
'spectra_files': os.path.join(installation_folder, 'DD_DM_targets_spectra'),
'verne_folder': _get_verne_folder(),
'verne_files': _get_verne_folder(),
'tmp_folder': get_temp(),
}
context.set_paths(default_context)
for detector in base_detectors:
context.register(detector)
return context
def _get_verne_folder():
if not dddm.utils.is_installed('verne'):
return './verne'
import verne
return os.path.join(os.path.split(verne.__path__[0])[0], 'results')
def get_temp():
if 'TMPDIR' in os.environ and os.access(os.environ['TMPDIR'], os.W_OK):
tmp_folder = os.environ['TMPDIR']
elif 'TMP' in os.environ and os.access(os.environ['TMP'], os.W_OK):
tmp_folder = os.environ['TMP']
elif os.path.exists(_naive_tmp) and os.access(_naive_tmp, os.W_OK):
tmp_folder = _naive_tmp
else:
raise FileNotFoundError('No temp folder available')
return tmp_folder
def open_save_dir(save_as, base_dir=None, force_index=False, _hash=None):
"""
:param save_as: requested name of folder to open in the result folder
:param base_dir: folder where the save_as dir is to be saved in.
This is the results folder by default
:param force_index: option to force to write to a number (must be an
override!)
:param _hash: add a has to save_as dir to avoid duplicate naming
conventions while running multiple jobs
:return: the name of the folder as was saveable (usually input +
some number)
"""
if base_dir is None:
raise ValueError(save_as, base_dir, force_index, _hash)
if force_index:
results_path = os.path.join(base_dir, save_as + str(force_index))
elif _hash is None:
if force_index is not False:
raise ValueError(
f'do not set _hash to {_hash} and force_index to '
f'{force_index} simultaneously'
)
results_path = dddm.utils._folders_plus_one(base_dir, save_as)
else:
results_path = os.path.join(base_dir, save_as + '_HASH' + str(_hash))
dddm.utils.check_folder_for_file(os.path.join(results_path, "some_file_goes_here"))
log.info('open_save_dir::\tusing ' + results_path)
return results_path
```
#### File: dddm/detectors/super_cdms.py
```python
import typing as ty
from abc import ABC
from .experiment import Experiment, lindhard_quenching_factor, _get_nr_resolution
import numpy as np
import dddm
from functools import partial
export, __all__ = dddm.exporter()
class _BaseSuperCdms(Experiment, ABC):
"""Base class of superCDMS to introduce shared properties"""
location = "SNOLAB"
# Parameters needed for eq. 3, 4 of https://arxiv.org/pdf/1610.00006.pdf
# Since they are not directly used, they are not set as class attributes
_energy_parameters = dict(
si_hv={'Z': 14,
'k': 0.161,
'epsilon': 0.003,
'e_delta_v': 0.1,
'e_thr_phonon': 100e-3,
'sigma_phonon': 5e-3,
'sigma_ion': np.nan, # Only phonons
},
si_izip={'Z': 14,
'k': 0.161,
'epsilon': 0.003,
'e_delta_v': 0.008,
'e_thr_phonon': 175e-3,
'sigma_phonon': 25e-3,
'sigma_ion': 110e-3,
},
ge_hv={'Z': 32,
'k': 0.162,
'epsilon': 0.00382,
'e_delta_v': 0.1,
'e_thr_phonon': 100e-3,
'sigma_phonon': 10e-3,
'sigma_ion': np.nan, # Only phonons
},
ge_izip={'Z': 32,
'k': 0.162,
'epsilon': 0.00382,
'e_delta_v': 0.006,
'e_thr_phonon': 350e-3,
'sigma_phonon': 50e-3,
'sigma_ion': 100e-3,
},
)
def get_energy_thr_ee_from_phonon_thr(self) -> ty.Union[float, int]:
"""get the energy threshold (ee) based on the energy_parameters"""
assert self.interaction_type == 'migdal_SI'
this_conf = self._energy_parameters[self.detector_key]
return energy_ee_from_energy_phonon(
e_ph=this_conf['e_thr_phonon'],
e_delta_v=this_conf['e_delta_v'],
epsilon=this_conf['epsilon']
)
def get_energy_res_ee_from_phonon_res(self) -> ty.Union[float, int]:
"""get the energy resolution (ee) based on the energy_parameters"""
assert self.interaction_type == 'migdal_SI'
this_conf = self._energy_parameters[self.detector_key]
return energy_ee_from_energy_phonon(
e_ph=this_conf['sigma_phonon'],
e_delta_v=this_conf['e_delta_v'],
epsilon=this_conf['epsilon']
)
def energy_nr_to_detectable_energy_function(self) -> ty.Callable:
"""
Get phonon energy (hv) or ionization energy (izip) from nuclear recoil energy
"""
assert self.interaction_type == 'SI'
det_key = self.detector_key
this_conf = self._energy_parameters[det_key]
if 'izip' in det_key:
return partial(energy_ionization_from_e_nr,
Z=this_conf['Z'],
k=this_conf['k'],
)
if 'hv' in det_key:
return partial(energy_phonon_from_energy_nr,
Z=this_conf['Z'],
k=this_conf['k'],
e_delta_v=this_conf['e_delta_v'],
epsilon=this_conf['epsilon'],
)
raise ValueError(f'got {det_key}?!')
@property
def detector_key(self) -> str:
material = self.target_material.lower()
if 'hv' in self.detector_name.lower():
return f'{material}_hv'
assert 'izip' in self.detector_name.lower()
return f'{material}_izip'
@export
class SuperCdmsHvGeNr(_BaseSuperCdms):
detector_name = 'SuperCDMS_HV_Ge_NR'
target_material = 'Ge'
interaction_type = 'SI'
__version__ = '0.0.0'
exposure_tonne_year = 44 * 1.e-3 # Tonne year
energy_threshold_kev = 40. / 1e3 # table VIII, Enr
cut_efficiency = 0.85 # p. 11, right column
detection_efficiency = 0.85 # p. 11, left column NOTE: ER type!
def resolution(self, energies_in_kev):
"""Flat resolution"""
phonon_energy_from_nr = self.energy_nr_to_detectable_energy_function()
phonon_resolution = self._energy_parameters[self.detector_key]['sigma_phonon']
return _get_nr_resolution(energies_in_kev, phonon_energy_from_nr, phonon_resolution)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 27 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsHvSiNr(_BaseSuperCdms):
detector_name = 'SuperCDMS_HV_Si_NR'
target_material = 'Si'
interaction_type = 'SI'
__version__ = '0.0.0'
exposure_tonne_year = 9.6 * 1.e-3 # Tonne year
energy_threshold_kev = 78. / 1e3 # table VIII, Enr
cut_efficiency = 0.85 # p. 11, right column
detection_efficiency = 0.85 # p. 11, left column NOTE: ER type!
def resolution(self, energies_in_kev):
"""Flat resolution"""
phonon_energy_from_nr = self.energy_nr_to_detectable_energy_function()
phonon_resolution = self._energy_parameters[self.detector_key]['sigma_phonon']
return _get_nr_resolution(energies_in_kev, phonon_energy_from_nr, phonon_resolution)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 300 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsIzipGeNr(_BaseSuperCdms):
detector_name = 'SuperCDMS_iZIP_Ge_NR'
target_material = 'Ge'
interaction_type = 'SI'
__version__ = '0.0.0'
exposure_tonne_year = 56 * 1.e-3 # Tonne year
energy_threshold_kev = 272. / 1e3 # table VIII, Enr
cut_efficiency = 0.75 # p. 11, right column
detection_efficiency = 0.85 # p. 11, left column
def resolution(self, energies_in_kev):
"""Flat resolution"""
ionization_energy_from_nr = self.energy_nr_to_detectable_energy_function()
ionization_resolution = self._energy_parameters[self.detector_key]['sigma_ion']
return _get_nr_resolution(energies_in_kev, ionization_energy_from_nr, ionization_resolution)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 3300e-6 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsIzipSiNr(_BaseSuperCdms):
detector_name = 'SuperCDMS_iZIP_Si_NR'
target_material = 'Si'
interaction_type = 'SI'
__version__ = '0.0.0'
exposure_tonne_year = 4.8 * 1.e-3 # Tonne year
energy_threshold_kev = 166. / 1e3 # table VIII, Enr
cut_efficiency = 0.75 # p. 11, right column
detection_efficiency = 0.85 # p. 11, left column
def resolution(self, energies_in_kev):
"""Flat resolution"""
ionization_energy_from_nr = self.energy_nr_to_detectable_energy_function()
ionization_resolution = self._energy_parameters[self.detector_key]['sigma_ion']
return _get_nr_resolution(energies_in_kev, ionization_energy_from_nr, ionization_resolution)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 2900e-6 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsHvGeMigdal(_BaseSuperCdms):
detector_name = 'SuperCDMS_HV_Ge_Migdal'
target_material = 'Ge'
interaction_type = 'migdal_SI'
__version__ = '0.0.0'
exposure_tonne_year = 44 * 1.e-3 # Tonne year
cut_efficiency = 0.85 # p. 11, right column
detection_efficiency = 0.5 # p. 11, left column NOTE: migdal is ER type!
@property
def energy_threshold_kev(self):
return self.get_energy_thr_ee_from_phonon_thr()
def resolution(self, energies_in_kev):
"""Flat resolution"""
e_res_ee = self.get_energy_res_ee_from_phonon_res()
return self._flat_resolution(len(energies_in_kev), e_res_ee)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 27 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsHvSiMigdal(_BaseSuperCdms):
detector_name = 'SuperCDMS_HV_Si_Migdal'
target_material = 'Si'
interaction_type = 'migdal_SI'
__version__ = '0.0.0'
exposure_tonne_year = 9.6 * 1.e-3 # Tonne year
cut_efficiency = 0.85 # p. 11, right column
detection_efficiency = 0.675 # p. 11, left column NOTE: migdal is ER type!
@property
def energy_threshold_kev(self):
return self.get_energy_thr_ee_from_phonon_thr()
def resolution(self, energies_in_kev):
"""Flat resolution"""
e_res_ee = self.get_energy_res_ee_from_phonon_res()
return self._flat_resolution(len(energies_in_kev), e_res_ee)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 300 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsIzipGeMigdal(_BaseSuperCdms):
detector_name = 'SuperCDMS_iZIP_Ge_Migdal'
target_material = 'Ge'
interaction_type = 'migdal_SI'
__version__ = '0.0.0'
exposure_tonne_year = 56 * 1.e-3 # Tonne year
cut_efficiency = 0.75 # p. 11, right column
detection_efficiency = 0.5 # p. 11, left column NOTE: migdal is ER type!
@property
def energy_threshold_kev(self):
return self.get_energy_thr_ee_from_phonon_thr()
def resolution(self, energies_in_kev):
"""Flat resolution"""
e_res_ee = self.get_energy_res_ee_from_phonon_res()
return self._flat_resolution(len(energies_in_kev), e_res_ee)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 22 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
@export
class SuperCdmsIzipSiMigdal(_BaseSuperCdms):
detector_name = 'SuperCDMS_iZIP_Si_Migdal'
target_material = 'Si'
interaction_type = 'migdal_SI'
__version__ = '0.0.0'
exposure_tonne_year = 4.8 * 1.e-3 # Tonne year
cut_efficiency = 0.75 # p. 11, right column
detection_efficiency = 0.675 # p. 11, left column NOTE: migdal is ER type!
@property
def energy_threshold_kev(self):
return self.get_energy_thr_ee_from_phonon_thr()
def resolution(self, energies_in_kev):
"""Flat resolution"""
e_res_ee = self.get_energy_res_ee_from_phonon_res()
return self._flat_resolution(len(energies_in_kev), e_res_ee)
def background_function(self, energies_in_kev):
"""Flat bg rate"""
bg_rate_nr = 370 # counts/kg/keV/year
conv_units = 1.0e3 # Tonne
return self._flat_background(len(energies_in_kev), bg_rate_nr * conv_units)
def energy_ee_from_energy_phonon(e_ph, e_delta_v, epsilon):
"""Eq. 4 in https://arxiv.org/abs/1610.00006 rewritten to ee
(`y`=1) and `eta`=1"""
return e_ph / (1 + e_delta_v / epsilon)
def energy_phonon_from_energy_nr(e_r_nr, Z, k, e_delta_v, epsilon):
y = lindhard_quenching_factor(e_r_nr, atomic_number_z=Z, k=k)
if not isinstance(y, np.ndarray):
raise ValueError
return e_r_nr * (1 + y * (e_delta_v / epsilon))
def energy_ionization_from_e_nr(e_r_nr, Z, k):
y = lindhard_quenching_factor(e_r_nr, atomic_number_z=Z, k=k)
if not isinstance(y, np.ndarray):
raise ValueError
return e_r_nr * y
```
#### File: dddm/dddm/priors.py
```python
from immutabledict import immutabledict
import dddm
import numpy as np
export, __all__ = dddm.exporter()
@export
def get_priors(priors_from="Evans_2019"):
"""
:return: dictionary of priors, type and values
"""
if priors_from == "Pato_2010":
priors = {'log_mass': {'range': [0.1, 3], 'prior_type': 'flat'},
'log_cross_section': {'range': [-46, -42], 'prior_type': 'flat'},
'density': {'range': [0.001, 0.9], 'prior_type': 'gauss', 'mean': 0.4,
'std': 0.1},
'v_0': {'range': [80, 380], 'prior_type': 'gauss', 'mean': 230, 'std': 30},
'v_esc': {'range': [379, 709], 'prior_type': 'gauss', 'mean': 544, 'std': 33},
'k': {'range': [0.5, 3.5], 'prior_type': 'flat'}}
elif priors_from == "Evans_2019":
# https://arxiv.org/abs/1901.02016
priors = {'log_mass': {'range': [0.1, 3], 'prior_type': 'flat'},
'log_cross_section': {'range': [-46, -42], 'prior_type': 'flat'},
'density': {'range': [0.001, 0.9], 'prior_type': 'gauss', 'mean': 0.55,
'std': 0.17},
'v_0': {'range': [80, 380], 'prior_type': 'gauss', 'mean': 233, 'std': 3},
'v_esc': {'range': [379, 709], 'prior_type': 'gauss', 'mean': 528, 'std': 24.5}}
elif priors_from == "migdal_wide":
priors = {'log_mass': {'range': [-1.5, 1.5], 'prior_type': 'flat'},
'log_cross_section': {'range': [-48, -37], 'prior_type': 'flat'},
# see Evans_2019_constraint
'density': {'range': [0.001, 0.9], 'prior_type': 'gauss', 'mean': 0.55,
'std': 0.17},
'v_0': {'range': [80, 380], 'prior_type': 'gauss', 'mean': 233, 'std': 20},
'v_esc': {'range': [379, 709], 'prior_type': 'gauss', 'mean': 528, 'std': 24.5},
'k': {'range': [0.5, 3.5], 'prior_type': 'flat'}}
elif priors_from == "low_mass":
priors = {'log_mass': {'range': [-1.5, 1.5], 'prior_type': 'flat'},
'log_cross_section': {'range': [-48, -37], 'prior_type': 'flat'},
# see Evans_2019_constraint
'density': {'range': [0.0001, 1], 'prior_type': 'gauss', 'mean': 0.55,
'std': 0.17},
'v_0': {'range': [133, 333], 'prior_type': 'gauss', 'mean': 233, 'std': 20},
'v_esc': {'range': [405.5, 650.5], 'prior_type': 'gauss', 'mean': 528,
'std': 24.5}}
elif priors_from == "low_mass_fixed":
priors = {'log_mass': {'range': [-2, 2], 'prior_type': 'flat'},
'log_cross_section': {'range': [-53, -27], 'prior_type': 'flat'},
# see Evans_2019_constraint
'density': {'range': [0.0001, 1], 'prior_type': 'gauss', 'mean': 0.55,
'std': 0.17},
'v_0': {'range': [133, 333], 'prior_type': 'gauss', 'mean': 233, 'std': 20},
'v_esc': {'range': [405.5, 650.5], 'prior_type': 'gauss', 'mean': 528,
'std': 24.5}}
elif priors_from == "migdal_extremely_wide":
priors = {'log_mass': {'range': [-2, 3], 'prior_type': 'flat'},
'log_cross_section': {'range': [-50, -30], 'prior_type': 'flat'},
'density': {'range': [0.001, 0.9], 'prior_type': 'gauss', 'mean': 0.55,
'std': 0.5},
'v_0': {'range': [80, 380], 'prior_type': 'gauss', 'mean': 233, 'std': 90},
'v_esc': {'range': [379, 709], 'prior_type': 'gauss', 'mean': 528, 'std': 99},
'k': {'range': [0.5, 3.5], 'prior_type': 'flat'}
}
else:
raise NotImplementedError(
f"Taking priors from {priors_from} is not implemented")
for key in priors.keys():
param = priors[key]
if param['prior_type'] == 'flat':
param['param'] = param['range']
param['dist'] = flat_prior_distribution
elif param['prior_type'] == 'gauss':
param['param'] = param['mean'], param['std']
param['dist'] = gauss_prior_distribution
return immutabledict(priors)
def flat_prior_distribution(_range):
return np.random.uniform(_range[0], _range[1])
def gauss_prior_distribution(_param):
mu, sigma = _param
return np.random.normal(mu, sigma)
```
#### File: dddm/recoil_rates/halo.py
```python
import numericalunits as nu
import wimprates as wr
import dddm
export, __all__ = dddm.exporter()
@export
class SHM:
"""
class used to pass a halo model to the rate computation
must contain:
:param v_esc -- escape velocity (multiplied by units)
:param rho_dm -- density in mass/volume of dark matter at the Earth (multiplied by units)
The standard halo model also allows variation of v_0
:param v_0 -- v0 of the velocity distribution (multiplied by units)
:function velocity_dist -- function taking v,t giving normalised
velocity distribution in earth rest-frame.
"""
def __init__(self, v_0=None, v_esc=None, rho_dm=None):
self.v_0 = 230 * nu.km / nu.s if v_0 is None else v_0
self.v_esc = 544 * nu.km / nu.s if v_esc is None else v_esc
self.rho_dm = (0.3 * nu.GeV / nu.c0 ** 2 / nu.cm ** 3
if rho_dm is None else rho_dm)
def __str__(self):
# Standard Halo Model (shm)
return 'shm'
def velocity_dist(self, v, t):
"""
Get the velocity distribution in units of per velocity,
:param v: v is in units of velocity
:return: observed velocity distribution at earth
"""
return wr.observed_speed_dist(v, t, self.v_0, self.v_esc)
def parameter_dict(self):
"""Return a dict of readable parameters of the current settings"""
return dict(
v_0=self.v_0 / (nu.km / nu.s),
v_esc=self.v_esc / (nu.km / nu.s),
rho_dm=self.rho_dm / (nu.GeV / nu.c0 ** 2 / nu.cm ** 3),
)
```
#### File: dddm/recoil_rates/halo_shielded.py
```python
import os
import shutil
import numericalunits as nu
import pandas as pd
from dddm import utils, exporter
import warnings
from scipy.interpolate import interp1d
import numpy as np
export, __all__ = exporter()
@export
class ShieldedSHM:
"""
class used to pass a halo model to the rate computation based on the
earth shielding effect as calculated by Verne
must contain:
:param v_esc -- escape velocity (multiplied by units)
:param rho_dm -- density in mass/volume of dark matter at the Earth (multiplied by units)
The standard halo model also allows variation of v_0
:param v_0 -- v0 of the velocity distribution (multiplied by units)
:function velocity_dist -- function taking v,t giving normalised
velocity distribution in earth rest-frame.
"""
def __init__(self,
location,
file_folder='./verne_files',
v_0=None,
v_esc=None,
rho_dm=None,
log_cross_section=None,
log_mass=None,
):
v_0_nodim = 230 if v_0 is None else v_0 / (nu.km / nu.s)
v_esc_nodim = 544 if v_esc is None else v_esc / (nu.km / nu.s)
rho_dm_nodim = (0.3 if rho_dm is None else
rho_dm / (nu.GeV / nu.c0 ** 2 / nu.cm ** 3))
# Here we keep the units dimensionful as these parameters are requested
# by wimprates and therefore must have dimensions
self.v_0 = v_0_nodim * nu.km / nu.s
self.v_esc = v_esc_nodim * nu.km / nu.s
self.rho_dm = rho_dm_nodim * nu.GeV / nu.c0 ** 2 / nu.cm ** 3
assert np.isclose(self.v_0_nodim, v_0_nodim), (self.v_0_nodim, v_0_nodim)
assert np.isclose(self.v_esc_nodim, v_esc_nodim), (self.v_esc_nodim, v_esc_nodim)
assert np.isclose(self.rho_dm_nodim, rho_dm_nodim), (self.rho_dm_nodim, rho_dm_nodim)
# in contrast to the SHM, the earth shielding does need the mass and
# cross-section to calculate the rates.
self.log_cross_section = -35 if log_cross_section is None else log_cross_section
self.log_mass = 0 if log_mass is None else log_mass
self.location = "XENON" if location is None else location
# Combine the parameters into a single naming convention. This is were
# we will save/read the velocity distribution (from).
self.fname = os.path.join(
'f_params',
f'loc_{self.location}',
f'v0_{int(self.v_0_nodim)}',
f'vesc_{int(self.v_esc_nodim)}',
f'rho_{self.rho_dm_nodim:.3f}',
f'sig_{self.log_cross_section:.1f}_mx_{self.log_mass:.2f}',
)
self.itp_func = None
self.log = utils.get_logger(self.__class__.__name__)
self.file_folder = file_folder
def __str__(self):
# The standard halo model observed at some location shielded from strongly
# interacting DM by overburden (rock atmosphere)
return 'shielded_shm'
def load_f(self):
"""
load the velocity distribution. If there is no velocity
distribution shaved, load one.
:return:
"""
import verne
# set up folders and names
file_folder = self.file_folder
file_name = os.path.join(file_folder, self.fname + '_avg' + '.csv')
utils.check_folder_for_file(os.path.join(file_folder, self.fname))
# Convert file_name and self.fname to folder and name of csv file where
# to save.
temp_file_name = utils.add_temp_to_csv(file_name)
exist_csv = os.path.exists(file_name)
assertion_string = f'abs file {temp_file_name} should be a string\n'
assertion_string += f'exists csv {exist_csv} should be a bool'
self.log.info(f'load_f::\twrite to {file_name} ({not exist_csv}). '
f'Then copy to {temp_file_name}')
assert (isinstance(temp_file_name, str) and
isinstance(exist_csv, bool)), assertion_string
if not exist_csv:
self.log.info(f'Using {file_name} for the velocity distribution. '
f'Writing to {temp_file_name}')
df = verne.CalcVelDist.avg_calcveldist(
m_x=10. ** self.log_mass,
sigma_p=10. ** self.log_cross_section,
loc=self.location,
v_esc=self.v_esc_nodim,
v_0=self.v_0_nodim,
N_gamma=4,
)
if not os.path.exists(file_name):
self.log.info(f'writing to {temp_file_name}')
df.to_csv(temp_file_name, index=False)
if not os.path.exists(file_name):
self.log.info(f'moving {temp_file_name} to {file_name}')
shutil.move(temp_file_name, file_name)
else:
self.log.warning(f'while writing {temp_file_name}, {file_name} was created')
else:
self.log.info(f'Using {file_name} for the velocity distribution')
try:
df = pd.read_csv(file_name)
except pd.io.common.EmptyDataError as pandas_error:
os.remove(file_name)
raise pandas_error
# Alright now load the data and interpolate that. This is the output
# that wimprates need
if not os.path.exists(os.path.abspath(file_name)):
raise OSError(f'{file_name} should exist. Is there anything at {temp_file_name}')
if not len(df):
# Somehow we got an empty dataframe, we cannot continue
os.remove(file_name)
raise ValueError(
f'Was trying to read an empty dataframe from {file_name}:\n{df}')
x, y = df.keys()
interpolation = interp1d(
df[x] * (nu.km / nu.s), df[y] * (nu.s / nu.km), bounds_error=False, fill_value=0)
def velocity_dist(v_, t_):
# Wimprates needs to have a two-parameter function. However since we
# ignore time for now. We make this makeshift transition from a one
# parameter function to a two parameter function
return interpolation(v_)
self.itp_func = velocity_dist
def velocity_dist(self, v, t):
"""
Get the velocity distribution in units of per velocity,
:param v: v is in units of velocity
:return: observed velocity distribution at earth
"""
if self.itp_func is None:
self.load_f()
return self.itp_func(v, t)
def parameter_dict(self):
"""Return a dict of readable parameters of the current settings"""
return dict(
v_0=self.v_0_nodim,
v_esc=self.v_esc_nodim,
rho_dm=self.rho_dm_nodim,
log_cross_section=self.log_cross_section,
log_mass=self.log_mass,
location=self.location,
)
@property
def v_0_nodim(self):
return self.v_0 / (nu.km / nu.s)
@property
def v_esc_nodim(self):
return self.v_esc / (nu.km / nu.s)
@property
def rho_dm_nodim(self):
return self.rho_dm / (nu.GeV / nu.c0 ** 2 / nu.cm ** 3)
class VerneSHM(ShieldedSHM):
def __init__(self, *args, **kwargs):
warnings.warn("Use ShieldedSHM instead of VerneSHM", DeprecationWarning)
super().__init__(*args, **kwargs)
```
#### File: dddm/samplers/pymultinest.py
```python
from __future__ import absolute_import, unicode_literals
import datetime
import json
import os
import shutil
import tempfile
from warnings import warn
import corner
import matplotlib.pyplot as plt
import numpy as np
from scipy import special as spsp
import dddm
import typing as ty
from immutabledict import immutabledict
export, __all__ = dddm.exporter()
@export
class MultiNestSampler(dddm.StatModel):
def __init__(self,
wimp_mass: ty.Union[float, int],
cross_section: ty.Union[float, int],
spectrum_class: ty.Union[dddm.DetectorSpectrum,
dddm.GenSpectrum],
prior: dict,
tmp_folder: str,
results_dir: str = None,
fit_parameters=('log_mass', 'log_cross_section', 'v_0', 'v_esc', 'density', 'k'),
detector_name=None,
verbose=False,
notes='default',
nlive=1024,
tol=0.1,
):
super().__init__(wimp_mass=wimp_mass,
cross_section=cross_section,
spectrum_class=spectrum_class,
prior=prior,
tmp_folder=tmp_folder,
fit_parameters=fit_parameters,
detector_name=detector_name,
verbose=verbose,
notes=notes,
)
self.results_dir = results_dir
self.config.update(
{'tol': tol, # Tolerance for sampling
'nlive': nlive, # number of live points
})
self.log_dict = {
'did_run': False,
'saved_in': None,
'tmp_dir': tmp_folder,
}
self.result = False
def check_did_run(self):
if not self.log_dict['did_run']:
self.log.info('did not run yet, lets fire it up!')
self.run()
else:
self.log.info('did run')
def check_did_save(self):
self.log.info(
"did not save yet, we don't want to lose our results so better do it now"
)
if self.log_dict['saved_in'] is None:
self.save_results()
def log_probability_nested(self, parameter_vals, parameter_names):
"""
:param parameter_vals: the values of the model/benchmark considered as the truth
# :param parameter_values: the values of the parameters that are being varied
:param parameter_names: the names of the parameter_values
:return:
"""
self.log.debug('there we go! Find that log probability')
evaluated_rate = self.eval_spectrum(parameter_vals, parameter_names)
ll = dddm.statistics.log_likelihood(self.benchmark_values, evaluated_rate)
if np.isnan(ll):
raise ValueError(f"Returned NaN from likelihood. ll = {ll}")
self.log.debug('found it! returning the log likelihood')
return ll
def log_prior_transform_nested(self, x, x_name):
self.log.debug(
'doing some transformations for nestle/multinest to read the priors'
)
this_prior = self.config['prior'][x_name]
prior_type = this_prior['prior_type']
if prior_type == 'flat':
a, b = this_prior['param']
# Prior transform of a flat prior is a simple line.
return x * (b - a) + a
if prior_type == 'gauss':
# Get the range from the config file
a, b = this_prior['range']
m, s = this_prior['param']
# Here the prior transform is being constructed and shifted. This may not seem trivial
# and one is advised to request a notebook where this is explained
# from the developer(s).
aprime = spsp.ndtr((a - m) / s)
bprime = spsp.ndtr((b - m) / s)
xprime = x * (bprime - aprime) + aprime
return m + s * spsp.ndtri(xprime)
raise ValueError(f"unknown prior type '{prior_type}'")
def _log_probability_nested(self, theta):
"""warp log_prior_transform_nested"""
ndim = len(theta)
return self.log_probability_nested(
theta, self.known_parameters[:ndim])
def _log_prior_transform_nested(self, theta):
result = [
self.log_prior_transform_nested(val, self.known_parameters[i])
for i, val in enumerate(theta)]
return np.array(result)
def _print_before_run(self):
self.log.warning(
f"""
--------------------------------------------------
{dddm.utils.now()}\n\tFinal print of all of the set options:
self.log = {self.log}
self.result = {self.result}
self.benchmark_values = {np.array(self.benchmark_values)}
self.config = {self.config}
--------------------------------------------------
"""
)
def run(self):
self._fix_parameters()
self._print_before_run()
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
'package pymultinest not found. See README') from e
n_dims = len(self.config["fit_parameters"])
tol = self.config['tol'] # the stopping criterion
save_at = self.get_save_dir()
self.log.warning(f'start_fit for {n_dims} parameters')
start = datetime.datetime.now()
# Multinest saves output to a folder. First write to the tmp folder,
# move it to the results folder later
_tmp_folder = self.get_save_dir()
save_at_temp = os.path.join(_tmp_folder, 'multinest')
solve_multinest(
LogLikelihood=self._log_probability_nested, # SafeLoglikelihood,
Prior=self._log_prior_transform_nested, # SafePrior,
n_live_points=self.config['nlive'],
n_dims=n_dims,
outputfiles_basename=save_at_temp,
verbose=True,
evidence_tolerance=tol,
# null_log_evidence=dddm.statistics.LL_LOW_BOUND,
max_iter=self.config.get('max_iter', 0),
)
self.result_file = save_at_temp
# Open a save-folder after successful running multinest. Move the
# multinest results there.
dddm.utils.check_folder_for_file(save_at)
end = datetime.datetime.now()
dt = (end - start).total_seconds()
self.log.info(f'fit_done in {dt} s ({dt / 3600} h)')
self.log_dict['did_run'] = True
# release the config
self.config = dddm.utils._immutable_to_dict(self.config)
self.config['fit_time'] = dt
self.log.info('Finished with running Multinest!')
def get_summary(self):
self.log.info(
"getting the summary (or at least trying) let's first see if I did run"
)
self.check_did_run()
# keep a dictionary of all the results
resdict = {}
# Do the import of multinest inside the class such that the package can be
# loaded without multinest
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError:
raise ModuleNotFoundError(
'package pymultinest not found. See README for installation')
self.log.info('start analyzer of results')
analyzer = Analyzer(len(self.config['fit_parameters']),
outputfiles_basename=self.result_file)
# Taken from multinest.solve
self.result = analyzer.get_stats()
samples = analyzer.get_equal_weighted_posterior()[:, :-1]
self.log.info('parameter values:')
for name, col in zip(self.config['fit_parameters'],
samples.transpose()):
self.log.info(
'%15s : %.3f +- %.3f' %
(name, col.mean(), col.std()))
resdict[name + '_fit_res'] = (
'{0:5.2f} +/- {1:5.2f}'.format(col.mean(), col.std()))
if 'log_' in name:
resdict[name[4:] + '_fit_res'] = '%.3g +/- %.2g' % (
10. ** col.mean(), 10. ** (col.mean()) * np.log(10.) * col.std())
self.log.info(f'\t {name[4:]},'
f' {resdict[name[4:] + "_fit_res"]}')
resdict['best_fit'] = np.mean(samples.transpose(), axis=1)
print(resdict['best_fit'])
resdict['cov_matrix'] = np.cov(samples.transpose())
print(resdict['cov_matrix'])
resdict['n_samples'] = len(samples.transpose()[0])
# Pass the samples to the self.result to be saved.
self.result['samples'] = samples
self.log.info('Alright we got all the info we need')
return resdict
def get_save_dir(self, force_index=False, _hash=None) -> str:
saved_in = self.log_dict['saved_in']
saved_ok = isinstance(saved_in, str) and os.path.exists(saved_in)
if saved_ok and not force_index:
return saved_in
target_save = dddm.context.open_save_dir(
f'nes_{self.__class__.__name__[:3]}',
base_dir=self.results_dir,
force_index=force_index,
_hash=_hash)
self.log_dict['saved_in'] = target_save
self.log.info(f'get_save_dir\tsave_dir = {target_save}')
return target_save
def save_results(self, force_index=False):
self.log.info('Saving results after checking we did run')
# save fit parameters to config
self.check_did_run()
save_dir = self.get_save_dir(force_index=force_index)
fit_summary = self.get_summary()
self.log.info(f'storing in {save_dir}')
# save the config, chain and flattened chain
pid_id = 'pid' + str(os.getpid()) + '_'
with open(os.path.join(save_dir, f'{pid_id}config.json'), 'w') as file:
json.dump(convert_dic_to_savable(self.config), file, indent=4)
with open(os.path.join(save_dir, f'{pid_id}res_dict.json'), 'w') as file:
json.dump(convert_dic_to_savable(fit_summary), file, indent=4)
np.save(
os.path.join(save_dir, f'{pid_id}config.npy'),
convert_dic_to_savable(self.config))
np.save(os.path.join(save_dir, f'{pid_id}res_dict.npy'),
convert_dic_to_savable(fit_summary))
for col in self.result.keys():
if col == 'samples' or not isinstance(col, dict):
if col == 'samples':
# in contrast to nestle, multinest returns the weighted
# samples.
store_at = os.path.join(save_dir,
f'{pid_id}weighted_samples.npy')
else:
store_at = os.path.join(
save_dir,
pid_id + col + '.npy')
np.save(store_at, self.result[col])
else:
np.save(os.path.join(save_dir, pid_id + col + '.npy'),
convert_dic_to_savable(self.result[col]))
if 'logging' in self.config:
store_at = os.path.join(save_dir,
self.config['logging'].split('/')[-1])
shutil.copy(self.config['logging'], store_at)
self.log.info('save_results::\tdone_saving')
def show_corner(self):
self.check_did_save()
save_dir = self.log_dict['saved_in']
combined_results = load_multinest_samples_from_file(save_dir)
multinest_corner(combined_results, save_dir)
self.log.info('Enjoy the plot. Maybe you do want to save it too?')
def convert_dic_to_savable(config):
result = config.copy()
if isinstance(config, immutabledict):
result = dict(config.items())
for key, value in result.items():
if dddm.utils.is_savable_type(value):
continue
if isinstance(value, (dict, immutabledict)):
result[key] = convert_dic_to_savable(result[key])
elif isinstance(value, np.ndarray):
result[key] = value.tolist()
elif isinstance(value, np.integer):
result[key] = int(value)
elif isinstance(value, np.floating):
result[key] = float(value)
else:
result[key] = str(result[key])
return result
def load_multinest_samples_from_file(load_dir):
keys = os.listdir(load_dir)
keys = [key for key in keys if os.path.isfile(os.path.join(load_dir, key))]
result = {}
for key in keys:
if '.npy' in key:
naked_key = key.split('.npy')[0]
naked_key = do_strip_from_pid(naked_key)
tmp_res = np.load(os.path.join(load_dir, key), allow_pickle=True)
if naked_key in ['config', 'res_dict']:
result[naked_key] = tmp_res.item()
else:
result[naked_key] = tmp_res
return result
def do_strip_from_pid(string):
"""
remove PID identifier from a string
"""
if 'pid' not in string:
return string
new_key = string.split("_")
new_key = "_".join(new_key[1:])
return new_key
def _get_info(result, _result_key):
info = r"$M_\chi}$=%.2f" % 10. ** np.float64(result['config']['log_mass'])
for prior_key in result['config']['prior'].keys():
if (prior_key in result['config']['prior'] and
'mean' in result['config']['prior'][prior_key]):
mean = result['config']['prior'][prior_key]['mean']
info += f"\n{prior_key} = {mean}"
nposterior, ndim = np.shape(result[_result_key])
info += "\nnposterior = %s" % nposterior
for str_inf in ['detector', 'notes', 'start', 'fit_time', 'poisson',
'n_energy_bins']:
if str_inf in result['config']:
info += f"\n{str_inf} = %s" % result['config'][str_inf]
if str_inf == 'start':
info = info[:-7]
if str_inf == 'fit_time':
info += 's (%.1f h)' % (float(result['config'][str_inf]) / 3600.)
return info, ndim
def multinest_corner(
result,
save=False,
_result_key='weighted_samples',
_weights=False):
info, ndim = _get_info(result, _result_key)
labels = dddm.statistics.get_param_list()[:ndim]
truths = []
for prior_name in dddm.statistics.get_prior_list()[:ndim]:
if prior_name == "rho_0":
prior_name = 'density'
if prior_name in result['config']:
truths.append(result['config'][prior_name])
else:
truths.append(result['config']['prior'][prior_name]['mean'])
weight_kwargs = dict(weights=result['weights']) if _weights else {}
fig = corner.corner(
result[_result_key],
**weight_kwargs,
labels=labels,
range=[0.99999, 0.99999, 0.99999, 0.99999, 0.99999][:ndim],
truths=truths,
show_titles=True)
fig.axes[1].set_title('Fit title', loc='left')
fig.axes[1].text(0, 1, info, verticalalignment='top')
if save:
plt.savefig(f"{save}corner.png", dpi=200)
def solve_multinest(LogLikelihood, Prior, n_dims, **kwargs):
"""
See PyMultinest Solve() for documentation
"""
from pymultinest.solve import run, Analyzer
kwargs['n_dims'] = n_dims
files_temporary = False
if 'outputfiles_basename' not in kwargs:
files_temporary = True
tempdir = tempfile.mkdtemp('pymultinest')
kwargs['outputfiles_basename'] = tempdir + '/'
outputfiles_basename = kwargs['outputfiles_basename']
def SafePrior(cube, ndim, nparams):
a = np.array([cube[i] for i in range(n_dims)])
b = Prior(a)
for i in range(n_dims):
cube[i] = b[i]
def SafeLoglikelihood(cube, ndim, nparams, lnew):
a = np.array([cube[i] for i in range(n_dims)])
likelihood = float(LogLikelihood(a))
if not np.isfinite(likelihood):
warn(f'WARNING: loglikelihood not finite: {likelihood}\n'
f'for parameters {a}, returned very low value instead')
return -dddm.statistics.LL_LOW_BOUND
return likelihood
kwargs['LogLikelihood'] = SafeLoglikelihood
kwargs['Prior'] = SafePrior
run(**kwargs)
analyzer = Analyzer(
n_dims, outputfiles_basename=outputfiles_basename)
try:
stats = analyzer.get_stats()
except ValueError as e:
# This can happen during testing if we limit the number of iterations
warn(f'Cannot load output file: {e}')
stats = {'nested sampling global log-evidence': -1,
'nested sampling global log-evidence error': -1
}
samples = analyzer.get_equal_weighted_posterior()[:, :-1]
return dict(logZ=stats['nested sampling global log-evidence'],
logZerr=stats['nested sampling global log-evidence error'],
samples=samples,
)
```
#### File: dddm/tests/test_experiment_class.py
```python
from unittest import TestCase
import dddm
class TestExperimentClass(TestCase):
"""See if we can init the Experiment class as we expect"""
def test_dummy_init(self):
class DummyExperiment(dddm.Experiment):
pass
dummy_experiment = DummyExperiment()
# dummy_experiment._check_class()
def test_incomplete_init(self):
class IncompleteExperiment(dddm.Experiment):
pass
incomplete = IncompleteExperiment()
with self.assertRaises(NotImplementedError):
incomplete._check_class()
``` |
{
"source": "JoranAngevaare/strax",
"score": 2
} |
#### File: strax/storage/common.py
```python
from ast import literal_eval
from concurrent.futures import wait
import logging
from packaging import version
import time
import typing
import warnings
import numpy as np
import strax
export, __all__ = strax.exporter()
@export
class DataKey:
"""Request for data to a storage registry
Instances of this class uniquely identify a single piece of strax data
abstractly -- that is, it describes the full history of algorithms that
have to be run to reproduce it.
It is used for communication between the main Context class and storage
frontends.
"""
run_id: str
data_type: str
lineage: dict
# Do NOT use directly, use the lineage_hash method
_lineage_hash = ''
def __init__(self, run_id, data_type, lineage):
self.run_id = run_id
self.data_type = data_type
self.lineage = lineage
def __repr__(self):
return '-'.join([self.run_id, self.data_type, self.lineage_hash])
@property
def lineage_hash(self):
"""Deterministic hash of the lineage"""
# We cache the hash computation to benefit tight loops calling
# this property
if self._lineage_hash == '':
self._lineage_hash = strax.deterministic_hash(self.lineage)
return self._lineage_hash
@export
class DataNotAvailable(Exception):
"""Raised when requested data is not available"""
pass
@export
class EmptyDataWarning(UserWarning):
pass
@export
class DataExistsError(Exception):
"""Raised when attempting to write a piece of data
that is already written"""
def __init__(self, at, message=''):
super().__init__(message)
self.at = at
@export
class DataCorrupted(Exception):
pass
@export
class RunMetadataNotAvailable(Exception):
pass
@export
class StorageFrontend:
"""Interface to something that knows data-locations and run-level metadata.
For example, a runs database, or a data directory on the file system.
"""
backends: list
can_define_runs = False
provide_run_metadata = False
def __init__(self,
readonly=False,
provide_run_metadata=None,
overwrite='if_broken',
take_only=tuple(),
exclude=tuple()):
"""
:param readonly: If True, throws CannotWriteData whenever saving is
attempted.
:param overwrite: When to overwrite data that already exists.
- 'never': Never overwrite any data.
- 'if_broken': Only overwrites data if it is incomplete or broken.
- 'always': Always overwrite data. Use with caution!
:param take_only: Provide/accept only these data types.
:param exclude: Do NOT provide/accept these data types.
:param provide_run_metadata: Whether to provide run-level metadata
(run docs). If None, use class-specific default
If take_only and exclude are both omitted, provide all data types.
If a data type is listed in both, it will not be provided.
Attempting to read/write unwanted data types throws DataTypeNotWanted.
"""
if overwrite not in 'never if_broken always'.split():
raise RuntimeError(f"Invalid 'overwrite' setting {overwrite}. ")
self.take_only = strax.to_str_tuple(take_only)
self.exclude = strax.to_str_tuple(exclude)
self.overwrite = overwrite
if provide_run_metadata is not None:
self.provide_run_metadata = provide_run_metadata
self.readonly = readonly
self.log = logging.getLogger(self.__class__.__name__)
def loader(self, key: DataKey,
time_range=None,
allow_incomplete=False,
fuzzy_for=tuple(),
fuzzy_for_options=tuple(),
chunk_number=None,
executor=None):
"""Return loader for data described by DataKey.
:param key: DataKey describing data
:param time_range: 2-length arraylike of (start, exclusive end)
of row numbers to get. Default is None, which means get the entire
run.
:param allow_incomplete: Allow loading of data which has not been
completely written to disk yet.
:param fuzzy_for: list/tuple of plugin names for which no
plugin name, version, or option check is performed.
:param fuzzy_for_options: list/tuple of configuration options for which
no check is performed.
:param chunk_number: Chunk number to load exclusively.
:param executor: Executor for pushing load computation to
"""
backend, backend_key = self.find(key,
write=False,
allow_incomplete=allow_incomplete,
fuzzy_for=fuzzy_for,
fuzzy_for_options=fuzzy_for_options)
return self._get_backend(backend).loader(
backend_key,
time_range=time_range,
executor=executor,
chunk_number=chunk_number)
def saver(self, key, metadata):
"""Return saver for data described by DataKey."""
backend, backend_key = self.find(key, write=True)
return self._get_backend(backend).saver(backend_key,
metadata)
def get_metadata(self, key,
allow_incomplete=False,
fuzzy_for=tuple(),
fuzzy_for_options=tuple()):
"""Retrieve data-level metadata for the specified key.
Other parameters are the same as for .find
"""
backend, backend_key = self.find(key,
write=False,
check_broken=False,
allow_incomplete=allow_incomplete,
fuzzy_for=fuzzy_for,
fuzzy_for_options=fuzzy_for_options)
return self._get_backend(backend).get_metadata(backend_key)
def _we_take(self, data_type):
"""Return if data_type can be provided by this frontend"""
return not (data_type in self.exclude
or self.take_only and data_type not in self.take_only)
def find(self, key: DataKey,
write=False,
check_broken=True,
allow_incomplete=False,
fuzzy_for=tuple(), fuzzy_for_options=tuple()):
"""Return (str: backend class name, backend-specific) key
to get at / write data, or raise exception.
:param key: DataKey of data to load
{data_type: (plugin_name, version, {config_option: value, ...}, ...}
:param write: Set to True if writing new data. The data is immediately
registered, so you must follow up on the write!
:param check_broken: If True, raise DataNotAvailable if data has not
been complete written, or writing terminated with an exception.
"""
message = (
f"\nRequested lineage: {key.lineage}."
f"\nIgnoring plugin lineage for: {fuzzy_for}."
f"\nIgnoring config options: {fuzzy_for}.")
if not self._we_take(key.data_type):
raise DataNotAvailable(
f"{self} does not accept or provide data type {key.data_type}")
if write:
if self.readonly:
raise DataNotAvailable(f"{self} cannot write any-data, "
"it's readonly")
try:
at = self.find(key, write=False,
allow_incomplete=allow_incomplete,
fuzzy_for=fuzzy_for,
fuzzy_for_options=fuzzy_for_options)
raise DataExistsError(
at=at,
message=(f"Data already exists at {at}.\n"
+ message))
except DataNotAvailable:
pass
try:
backend_name, backend_key = self._find(
key=key,
write=write,
allow_incomplete=allow_incomplete,
fuzzy_for=fuzzy_for,
fuzzy_for_options=fuzzy_for_options)
except DataNotAvailable:
raise DataNotAvailable(
f"{key.data_type} for {key.run_id} not available." + message)
if not write and check_broken:
# Get the metadata to check if the data is broken
meta = self._get_backend(backend_name).get_metadata(backend_key)
if 'exception' in meta:
exc = meta['exception']
raise DataNotAvailable(
f"Data in {backend_name} {backend_key} corrupted due to "
f"exception during writing: {exc}.")
if 'writing_ended' not in meta and not allow_incomplete:
raise DataNotAvailable(
f"Data in {backend_name} {backend_key} corrupted. No "
f"writing_ended field present!")
return backend_name, backend_key
def _get_backend(self, backend):
for b in self.backends:
if b.__class__.__name__ == backend:
return b
raise KeyError(f"Unknown storage backend {backend} specified")
def _matches(self, lineage: dict, desired_lineage: dict,
fuzzy_for: tuple, fuzzy_for_options: tuple):
"""Return if lineage matches desired_lineage given ignore options
"""
if not (fuzzy_for or fuzzy_for_options):
return lineage == desired_lineage
args = [fuzzy_for, fuzzy_for_options]
return (
self._filter_lineage(lineage, *args)
== self._filter_lineage(desired_lineage, *args))
@staticmethod
def _filter_lineage(lineage, fuzzy_for, fuzzy_for_options):
"""Return lineage without parts to be ignored in matching"""
return {data_type: (v[0],
v[1],
{option_name: b
for option_name, b in v[2].items()
if option_name not in fuzzy_for_options})
for data_type, v in lineage.items()
if data_type not in fuzzy_for}
def _can_overwrite(self, key: DataKey):
if self.overwrite == 'always':
return True
if self.overwrite == 'if_broken':
metadata = self.get_metadata(key)
return not ('writing_ended' in metadata
and 'exception' not in metadata)
return False
def find_several(self, keys, **kwargs):
"""Return list with backend keys or False
for several data keys.
Options are as for find()
"""
# You can override this if the backend has a smarter way
# of checking availability (e.g. a single DB query)
result = []
for key in keys:
try:
r = self.find(key, **kwargs)
except (strax.DataNotAvailable,
strax.DataCorrupted):
r = False
result.append(r)
return result
def define_run(self, name, sub_run_spec, **metadata):
self.write_run_metadata(name, dict(
sub_run_spec=sub_run_spec,
**metadata))
##
# Abstract methods (to override in child)
##
def _scan_runs(self, store_fields):
"""Iterable of run document / metadata dictionaries
"""
yield from tuple()
def _find(self, key: DataKey,
write, allow_incomplete, fuzzy_for, fuzzy_for_options):
"""Return backend key (e.g. for filename) for data identified by key,
raise DataNotAvailable, or DataExistsError
Parameters are as for find.
"""
# Use the self._matches attribute to compare lineages according to
# the fuzzy options
raise NotImplementedError
def run_metadata(self, run_id, projection=None):
"""Return run metadata dictionary, or raise RunMetadataNotAvailable"""
raise NotImplementedError
def write_run_metadata(self, run_id, metadata):
"""Stores metadata for run_id. Silently overwrites any previously
stored run-level metadata."""
raise NotImplementedError
def remove(self, key):
"""Removes a registration. Does not delete any actual data"""
raise NotImplementedError
@export
class StorageBackend:
"""Storage backend for strax data.
This is a 'dumb' interface to data. Each bit of data stored is described
by backend-specific keys (e.g. directory names).
Finding and assigning backend keys is the responsibility of the
StorageFrontend.
The backend class name + backend_key must together uniquely identify a
piece of data. So don't make __init__ take options like 'path' or 'host',
these have to be hardcoded (or made part of the key).
"""
def loader(self,
backend_key,
time_range=None,
chunk_number=None,
executor=None):
"""Iterates over strax data in backend_key
:param time_range: 2-length arraylike of (start, exclusive end)
of desired data. Will return all data that partially overlaps with
the range.
Default is None, which means get the entire
:param chunk_number: Chunk number to get exclusively
:param executor: Executor to push load/decompress operations to
"""
metadata = self.get_metadata(backend_key)
if 'strax_version' in metadata:
v_old = metadata['strax_version']
if version.parse(v_old) < version.parse('0.9.0'):
raise strax.DataNotAvailable(
f"Cannot load data at {backend_key}: "
f"it was created with strax {v_old}, "
f"but you have strax {strax.__version__}. ")
else:
warnings.warn(f"Data at {backend_key} does not say what strax "
"version it was generated with. This means it is "
"corrupted, or very, very old. Probably "
"we cannot load this.")
# 'start' and 'end' are not required, to allow allow_incomplete
required_fields = (
'run_id data_type data_kind dtype compressor').split()
missing_fields = [x for x in required_fields if x not in metadata]
if len(missing_fields):
raise strax.DataNotAvailable(
f"Cannot load data at {backend_key}: metadata is "
f"missing the required fields {missing_fields}. ")
if not len(metadata['chunks']):
raise ValueError(
f"Cannot load data at {backend_key}, it has no chunks!")
dtype = literal_eval(metadata['dtype'])
# Common arguments for chunk construction, not stored with chunk-level
# metadata
chunk_kwargs = dict(
data_type=metadata['data_type'],
data_kind=metadata['data_kind'],
dtype=dtype,
target_size_mb=metadata.get('chunk_target_size_mb',
strax.default_chunk_size_mb))
required_chunk_metadata_fields = 'start end run_id'.split()
for i, chunk_info in enumerate(strax.iter_chunk_meta(metadata)):
missing_fields = [x for x in required_chunk_metadata_fields
if x not in chunk_info]
if len(missing_fields):
raise ValueError(
f"Error reading chunk {i} of {metadata['dtype']} "
f"of {metadata['run_d']} from {backend_key}: "
f"chunk metadata is missing fields {missing_fields}")
# Chunk number constraint
if chunk_number is not None:
if i != chunk_number:
continue
# Time constraint
if time_range:
if (chunk_info['end'] <= time_range[0]
or time_range[1] <= chunk_info['start']):
# Chunk does not cover any part of range
continue
read_chunk_kwargs = dict(
backend_key=backend_key,
dtype=dtype,
metadata=metadata,
chunk_info=chunk_info,
time_range=time_range,
chunk_construction_kwargs=chunk_kwargs)
if executor is None:
yield self._read_and_format_chunk(**read_chunk_kwargs)
else:
yield executor.submit(self._read_and_format_chunk,
**read_chunk_kwargs)
def _read_and_format_chunk(self,
*,
backend_key,
dtype,
metadata,
chunk_info,
time_range,
chunk_construction_kwargs) -> strax.Chunk:
if chunk_info['n'] == 0:
# No data, no need to load
data = np.empty(0, dtype=dtype)
else:
data = self._read_chunk(backend_key,
chunk_info=chunk_info,
dtype=dtype,
compressor=metadata['compressor'])
result = strax.Chunk(
start=chunk_info['start'],
end=chunk_info['end'],
run_id=chunk_info['run_id'],
data=data,
**chunk_construction_kwargs)
if time_range:
if result.start < time_range[0]:
_, result = result.split(t=time_range[0],
allow_early_split=True)
if result.end > time_range[1]:
try:
result, _ = result.split(t=time_range[1],
allow_early_split=False)
except strax.CannotSplit:
pass
return result
def saver(self, key, metadata):
"""Return saver for data described by key"""
metadata.setdefault('compressor', 'blosc') # TODO wrong place?
metadata['strax_version'] = strax.__version__
if 'dtype' in metadata:
metadata['dtype'] = metadata['dtype'].descr.__repr__()
return self._saver(key, metadata)
##
# Abstract methods (to override in child)
##
def get_metadata(self, backend_key):
"""Return metadata of data described by key.
"""
raise NotImplementedError
def _read_chunk(self, backend_key, chunk_info, dtype, compressor):
"""Return a single data chunk"""
raise NotImplementedError
def _saver(self, key, metadata):
raise NotImplementedError
@export # Needed for type hints elsewhere
class Saver:
"""Interface for saving a data type
Must work even if forked.
Do NOT add unpickleable things as attributes (such as loggers)!
"""
closed = False
allow_rechunk = True # If False, do not rechunk even if plugin allows it
allow_fork = True # If False, cannot be inlined / forked
# This is set if the saver is operating in multiple processes at once
# Do not set it yourself
is_forked = False
got_exception = None
def __init__(self, metadata):
self.md = metadata
self.md['writing_started'] = time.time()
self.md['chunks'] = []
def save_from(self, source: typing.Generator, rechunk=True, executor=None):
"""Iterate over source and save the results under key
along with metadata
"""
pending = []
exhausted = False
chunk_i = 0
try:
while not exhausted:
chunk = None
try:
if rechunk and self.allow_rechunk:
while (chunk is None or
chunk.data.nbytes < chunk.target_size_mb*1e6):
chunk = strax.Chunk.concatenate(
[chunk, next(source)])
else:
chunk = next(source)
except StopIteration:
exhausted = True
if chunk is None:
break
new_f = self.save(chunk=chunk,
chunk_i=chunk_i, executor=executor)
pending = [f for f in pending if not f.done()]
if new_f is not None:
pending += [new_f]
chunk_i += 1
except strax.MailboxKilled:
# Write exception (with close), but exit gracefully.
# One traceback on screen is enough
self.close(wait_for=pending)
pass
except Exception as e:
# log exception for the final check
self.got_exception = e
# Throw the exception back into the mailbox
# (hoping that it is still listening...)
source.throw(e)
raise e
finally:
if not self.closed:
self.close(wait_for=pending)
def save(self, chunk: strax.Chunk, chunk_i: int, executor=None):
"""Save a chunk, returning future to wait on or None"""
if self.closed:
raise RuntimeError(f"Attmpt to save to {self.md} saver, "
f"which is already closed!")
chunk_info = dict(chunk_i=chunk_i,
n=len(chunk),
start=chunk.start,
end=chunk.end,
run_id=chunk.run_id,
nbytes=chunk.nbytes)
if len(chunk) != 0 and 'time' in chunk.dtype.names:
for desc, i in (('first', 0), ('last', -1)):
chunk_info[f'{desc}_time'] = \
int(chunk.data[i]['time'])
chunk_info[f'{desc}_endtime'] = \
int(strax.endtime(chunk.data[i]))
if len(chunk):
bonus_info, future = self._save_chunk(
chunk.data,
chunk_info,
executor=None if self.is_forked else executor)
chunk_info.update(bonus_info)
else:
# No need to create an empty file for an empty chunk;
# the annotation in the metadata is sufficient.
future = None
self._save_chunk_metadata(chunk_info)
return future
def close(self,
wait_for: typing.Union[list, tuple] = tuple(),
timeout=300):
if self.closed:
raise RuntimeError(f"{self.md} saver already closed")
if wait_for:
done, not_done = wait(wait_for, timeout=timeout)
if len(not_done):
raise RuntimeError(
f"{len(not_done)} futures of {self.md} did not"
"complete in time!")
self.closed = True
exc_info = strax.formatted_exception()
if exc_info:
self.md['exception'] = exc_info
if self.md['chunks']:
# Update to precise start and end values
self.md['start'] = self.md['chunks'][0]['start']
self.md['end'] = self.md['chunks'][-1]['end']
# If there were no chunks, we are certainly crashing.
# Don't throw another exception
self.md['writing_ended'] = time.time()
self._close()
##
# Abstract methods (to override in child)
##
def _save_chunk(self, data, chunk_info, executor=None):
"""Save a chunk to file. Return (
dict with extra info for metadata,
future to wait on or None)
"""
raise NotImplementedError
def _save_chunk_metadata(self, chunk_info):
raise NotImplementedError
def _close(self):
raise NotImplementedError
```
#### File: strax/tests/test_overlap_plugin.py
```python
from strax import testutils
import numpy as np
from hypothesis import given, strategies, example, settings
import strax
@given(testutils.disjoint_sorted_intervals.filter(lambda x: len(x) > 0),
strategies.integers(min_value=0, max_value=3))
@settings(deadline=None)
# Examples that trigger issue #49
@example(
input_peaks=np.array(
[(0, 1, 1, 0), (1, 10, 1, 0), (11, 1, 1, 0)],
dtype=strax.interval_dtype),
split_i=2)
@example(
input_peaks=np.array(
[(0, 1, 1, 0), (1, 1, 1, 0), (2, 9, 1, 0), (11, 1, 1, 0)],
dtype=strax.interval_dtype),
split_i=3)
# Other example that caused failures at some point
@example(
input_peaks=np.array(
[(0, 1, 1, 0), (7, 6, 1, 0), (13, 1, 1, 0)],
dtype=strax.interval_dtype),
split_i=2)
def test_overlap_plugin(input_peaks, split_i):
"""Counting the number of nearby peaks should not depend on how peaks are
chunked.
"""
chunks = np.split(input_peaks, [split_i])
chunks = [c for c in chunks if not len(c) == 0]
class Peaks(strax.Plugin):
depends_on = tuple()
dtype = strax.interval_dtype
def compute(self, chunk_i):
data = chunks[chunk_i]
return self.chunk(
data=data,
start=int(data[0]['time']),
end=int(strax.endtime(data[-1])))
# Hack to make peak output stop after a few chunks
def is_ready(self, chunk_i):
return chunk_i < len(chunks)
def source_finished(self):
return True
window = 10
# Note we must apply this to endtime, not time, since
# peaks straddling the overlap threshold are assigned to the NEXT window.
# If we used time it would fail on examples with peaks larger than window.
# In real life, the window should simply be chosen large enough that this
# is not an issue.
def count_in_window(ts, w=window):
# Terribly inefficient algorithm...
result = np.zeros(len(ts), dtype=np.int16)
for i, t in enumerate(ts):
result[i] = ((ts < t + w) & (ts > t - w)).sum()
return result
class WithinWindow(strax.OverlapWindowPlugin):
depends_on = ('peaks',)
dtype = [('n_within_window', np.int16)] + strax.time_fields
def get_window_size(self):
return window
def compute(self, peaks):
return dict(
n_within_window=count_in_window(strax.endtime(peaks)),
time=peaks['time'][:1],
endtime=strax.endtime(peaks)[-1:])
st = strax.Context(storage=[])
st.register(Peaks)
st.register(WithinWindow)
result = st.get_array(run_id='some_run', targets='within_window')
expected = count_in_window(strax.endtime(input_peaks))
assert len(expected) == len(input_peaks), "WTF??"
assert isinstance(result, np.ndarray), "Did not get an array"
assert len(result) == len(expected), "Result has wrong length"
np.testing.assert_equal(result['n_within_window'], expected,
"Counting went wrong")
``` |
{
"source": "jorana/straxen",
"score": 2
} |
#### File: straxen/plugins/event_shadow.py
```python
import numpy as np
import strax
import numba
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('pre_s2_area_threshold', default=1000,
help='Only take S2s large than this into account when calculating EventShadow.'),
strax.Option('time_window_backward', default=int(3e9),
help='Search for S2s causing shadow in this time window'))
class EventShadow(strax.Plugin):
"""
This plugin can find and calculate the previous S2 shadow at event level,
with time window backward and previous S2 area as options.
It also gives the area and position infomation of these previous S2s.
"""
__version__ = '0.0.6'
depends_on = ('event_basics','peak_basics','peak_positions')
provides = 'event_shadow'
save_when = strax.SaveWhen.EXPLICIT
def infer_dtype(self):
dtype = [
('pre_s2_area', np.float32,'previous s2 area [PE]'),
('shadow_dt', np.int64,'time diffrence to the previous s2 [ns]'),
('shadow', np.float32,'previous s2 shadow [PE/ns]'),
('pre_s2_x', np.float32,'x of previous s2 peak causing shadow [cm]'),
('pre_s2_y', np.float32,'y of previous s2 peak causing shadow [cm]'),
('shadow_distance', np.float32,'distance to the previous s2 peak causing the max shadow [cm]')
]
dtype += strax.time_fields
return dtype
def compute(self, events, peaks):
roi_dt = np.dtype([(('back in time', 'time'), int),
(('till it begin','endtime'), int)])
roi = np.zeros(len(events), dtype=roi_dt)
n_seconds = self.config['time_window_backward']
roi['time'] = events['time'] - n_seconds
roi['endtime'] = events['time']
mask_s2 = peaks['type'] == 2
mask_s2 &= peaks['area'] > self.config['pre_s2_area_threshold']
split_peaks = strax.split_touching_windows(peaks[mask_s2], roi)
res = np.zeros(len(events), self.dtype)
compute_shadow(events, split_peaks, res)
res['shadow_distance'] = ((res['pre_s2_x'] - events['s2_x'])**2+(res['pre_s2_y'] - events['s2_y'])**2)**0.5
res['time'] = events['time']
res['endtime'] = strax.endtime(events)
return res
def compute_shadow(events, split_peaks, res):
if len(res):
return _compute_shadow(events, split_peaks, res)
@numba.njit(cache=True)
def _compute_shadow(events, split_peaks, res):
for event_i, event_a in enumerate(events):
new_shadow = 0
for peak_i, peak_a in enumerate(split_peaks[event_i]):
new_shadow = peak_a['area']/(event_a['s2_center_time']-peak_a['center_time'])
if new_shadow > res['shadow'][event_i]:
res['pre_s2_area'][event_i] = peak_a['area']
res['shadow_dt'][event_i] = event_a['s2_center_time']-peak_a['center_time']
res['pre_s2_x'][event_i] = peak_a['x']
res['pre_s2_y'][event_i] = peak_a['y']
res['shadow'][event_i] = new_shadow
``` |
{
"source": "jorana/verne",
"score": 2
} |
#### File: verne/plotting/PlotVelDists_gamma.py
```python
import sys
sys.path.append("../src/")
import numpy as np
import MaxwellBoltzmann as MB
from numpy import pi
from scipy.integrate import quad, trapz
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import utils
from scipy.interpolate import interp2d, interp1d
from matplotlib import cm
#Matplotlib ------------
import matplotlib as mpl
font = { 'size' : 16, 'family':'serif'}
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rc('font', **font)
import matplotlib.pyplot as pl
#------------------------
#Threshold velocity
v_th = MB.vmin(10.0, 73.0, m_x=1e5)
#Load velocity distribution from file
def getVelDist(lsigstr, gamma_ind):
Ngamvals = 11
Nvvals = 61
rowvals = gamma_ind*61,
gamma_vals1, vvals1, fvals1 = np.loadtxt("../results/veldists/f_SUF_lmx5.0_lsig" + lsigstr + ".txt", unpack=True)
vvals = vvals1[gamma_ind*61:(gamma_ind+1)*61]
fvals = fvals1[gamma_ind*61:(gamma_ind+1)*61]
return vvals, fvals
v1 = np.linspace(0, 800, 100)
pl.figure()
ax1 = pl.gca()
cm_subsection = np.linspace(0.0, 1.0, 11)
col_list = [ cm.viridis(x) for x in cm_subsection ]
ax1.plot(v1, 1e3*MB.calcf_SHM(v1),'k--',linewidth=1.5)
s = "-29.60"
ax1.fill_between(np.linspace(0, v_th,100),0, 5,color='grey', alpha = 0.5, hatch="\\")
for i in range(10,-1, -1):
v, f = getVelDist(s, i)
ax1.plot(v, 1e3*f, linewidth=2.0, color=col_list[i], label=" ")
ax1.set_xlabel(r'$v_f\, \,[\mathrm{km/s}]$',fontsize=20.0)
ax1.set_ylabel(r'$\tilde{f}(v_f) \,\,[10^{-3} \,\mathrm{s/km}]$',fontsize=20.0)
ax1.set_ylim(0, 5)
ax1.yaxis.set_minor_locator(MultipleLocator(0.25))
ax1.xaxis.set_minor_locator(MultipleLocator(50))
m_x = 1e5
sigma_p = 10**(-29.6)
pl.text(30,5*625/800.0, r"$m_\chi = $" + utils.sciformat(m_x) + r" $\mathrm{GeV}$" +\
"\n" + r"$\sigma_p^{\mathrm{SI}} = $" + utils.sciformat_1(sigma_p) + r" $\mathrm{cm}^{2}$" + \
"\nSUF (d = 10.6m)",\
bbox=dict(boxstyle='round', facecolor='white', alpha=1.0) )
pl.text(375, 3.9, r"Average DM flux from...", fontsize=12.0)
pl.text(610, 4.55, r"above $(\gamma = 180^\circ)$", fontsize=12.0)
pl.text(620, 3.3, r"below $(\gamma = 0^\circ)$", fontsize=12.0)
pl.legend(loc=[0.8, 0.70], fontsize=6.0,labelspacing=0.001, handlelength=10.0, frameon=False)
pl.savefig('../plots/SpeedDists_gamma.pdf', bbox_inches='tight')
pl.show()
```
#### File: verne/plotting/PlotVelDists_xsec_CDMS.py
```python
import sys
sys.path.append("../src/")
import numpy as np
from numpy import pi
import MaxwellBoltzmann as MB
from scipy.integrate import quad, trapz
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import utils
from scipy.interpolate import interp1d
from matplotlib import cm
#Matplotlib ------------
import matplotlib as mpl
font = { 'size' : 16, 'family':'serif'}
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rc('font', **font)
import matplotlib.pyplot as pl
#------------------------
#Threshold velocity
v_th = MB.vmin(10.0, 73.0, m_x=1e5)
#Load velocity distribution from file
def getVelDist(lsigstr, gamma_ind):
Ngamvals = 11
Nvvals = 61
rowvals = gamma_ind*61,
gamma_vals1, vvals1, fvals1 = np.loadtxt("../results/veldists/f_SUF_lmx5.0_lsig" + lsigstr + ".txt", unpack=True)
vvals = vvals1[gamma_ind*61:(gamma_ind+1)*61]
fvals = fvals1[gamma_ind*61:(gamma_ind+1)*61]
return vvals, fvals
v1 = np.linspace(0, 800, 100)
pl.figure()
ax1 = pl.gca()
ax1.fill_between(np.linspace(0, v_th,100),0, 5,color='grey', alpha = 0.5, hatch="\\")
siglist = np.asarray([1e-30, 1e-29, 4e-29, 6.3e-29,1e-28, 1.3e-28])
cm_subsection = np.linspace(0.0, 0.85, len(siglist))
col_list = [ cm.Set1(x) for x in cm_subsection ]
ax1.plot(v1, MB.calcf_SHM(v1),'k--',linewidth=1.5)
for i,sig in enumerate(siglist):
v, f = getVelDist("%.2f"%(np.log10(sig),), 7)
ax1.plot(v, f, linewidth=2.0, color=col_list[i],label=str(int(sig*1e30)))
ax1.set_xlabel(r'$v_f\, \,[\mathrm{km/s}]$',fontsize=20.0)
ax1.set_ylabel(r'$\tilde{f}(v_f) \,\,[\mathrm{s/km}]$',fontsize=20.0)
ax1.set_ylim(1e-7, 1e0)
ax1.yaxis.set_minor_locator(MultipleLocator(0.25))
ax1.xaxis.set_minor_locator(MultipleLocator(50))
pl.text(30,4e-2, r"$m_\chi = $" + utils.sciformat(1e5) + r" $\mathrm{GeV}$" +\
"\n" + r"$\gamma = 126^\circ$" + \
"\nSUF (d = 10.6m)",\
bbox=dict(boxstyle='round', facecolor='white', alpha=1.0) )
ax1.set_yscale("log")
pl.text(425, 3e-1, r"$\sigma_p^\mathrm{SI} = 10^{-30} \,\,\mathrm{cm}^2 \times$", fontsize=18.0)
pl.legend(loc='upper right',markerfirst=False,fontsize=14.0,frameon=False)
pl.savefig('../plots/SpeedDists_xsec_CDMS.pdf', bbox_inches='tight')
pl.show()
``` |
{
"source": "jorana/WFSim",
"score": 2
} |
#### File: WFSim/wfsim/strax_interface.py
```python
import logging
import uproot
import nestpy
import numpy as np
import pandas as pd
import strax
from straxen.common import get_resource
from straxen import get_to_pe
from .core import RawData
export, __all__ = strax.exporter()
__all__ += ['instruction_dtype', 'truth_extra_dtype']
instruction_dtype = [
('event_number', np.int32),
('type', np.int8),
('time', np.int64),
('x', np.float32),
('y', np.float32),
('z', np.float32),
('amp', np.int32),
('recoil', '<U2')]
truth_extra_dtype = [
('n_electron', np.float),
('n_photon', np.float), ('n_photon_bottom', np.float),
('t_first_photon', np.float), ('t_last_photon', np.float),
('t_mean_photon', np.float), ('t_sigma_photon', np.float),
('t_first_electron', np.float), ('t_last_electron', np.float),
('t_mean_electron', np.float), ('t_sigma_electron', np.float), ('endtime',np.int64)]
log = logging.getLogger('SimulationCore')
@export
def rand_instructions(c):
n = c['nevents'] = c['event_rate'] * c['chunk_size'] * c['nchunk']
c['total_time'] = c['chunk_size'] * c['nchunk']
instructions = np.zeros(2 * n, dtype=instruction_dtype)
uniform_times = c['total_time'] * (np.arange(n) + 0.5) / n
instructions['time'] = np.repeat(uniform_times, 2) * int(1e9)
instructions['event_number'] = np.digitize(instructions['time'],
1e9 * np.arange(c['nchunk']) * c['chunk_size']) - 1
instructions['type'] = np.tile([1, 2], n)
instructions['recoil'] = ['er' for i in range(n * 2)]
r = np.sqrt(np.random.uniform(0, 2500, n))
t = np.random.uniform(-np.pi, np.pi, n)
instructions['x'] = np.repeat(r * np.cos(t), 2)
instructions['y'] = np.repeat(r * np.sin(t), 2)
instructions['z'] = np.repeat(np.random.uniform(-100, 0, n), 2)
nphotons = np.random.uniform(2000, 2050, n)
nelectrons = 10 ** (np.random.uniform(3, 4, n))
instructions['amp'] = np.vstack([nphotons, nelectrons]).T.flatten().astype(int)
return instructions
@export
def read_g4(file):
nc = nestpy.NESTcalc(nestpy.VDetector())
A = 131.293
Z = 54.
density = 2.862 # g/cm^3 #SR1 Value
drift_field = 82 # V/cm #SR1 Value
interaction = nestpy.INTERACTION_TYPE(7)
data = uproot.open(file)
all_ttrees = dict(data.allitems(filterclass=lambda cls: issubclass(cls, uproot.tree.TTreeMethods)))
e = all_ttrees[b'events/events;1']
time = e.array('time')
n_events = len(e.array('time'))
#lets separate the events in time by a constant time difference
time = time+np.arange(n_events)
#Events should be in the TPC
xp = e.array("xp") / 10
yp = e.array("yp") /10
zp = e.array("zp") /10
e_dep = e.array('ed')
tpc_radius_square = 2500
z_lower = -100
z_upper = 0
TPC_Cut = (zp > z_lower) & (zp < z_upper) & (xp**2+yp**2 <tpc_radius_square)
xp = xp[TPC_Cut]
yp = yp[TPC_Cut]
zp = zp[TPC_Cut]
e_dep = e_dep[TPC_Cut]
time = time[TPC_Cut]
event_number = np.repeat(e.array("eventid"),e.array("nsteps"))[TPC_Cut.flatten()]
n_instructions = len(time.flatten())
ins = np.zeros(2*n_instructions, dtype=instruction_dtype)
e_dep, ins['x'], ins['y'], ins['z'], ins['time'] = e_dep.flatten(), \
np.repeat(xp.flatten(),2 )/ 10, \
np.repeat(yp.flatten(),2 ) / 10, \
np.repeat(zp.flatten(),2 ) / 10, \
1e9*np.repeat(time.flatten(),2 )
ins['event_number'] = np.repeat(event_number,2)
ins['type'] = np.tile((1, 2), n_instructions)
ins['recoil'] = np.repeat('er', 2 * n_instructions)
quanta = []
for en in e_dep:
y = nc.GetYields(interaction,
en,
density,
drift_field,
A,
Z,
(1, 1))
quanta.append(nc.GetQuanta(y, density).photons)
quanta.append(nc.GetQuanta(y, density).electrons)
ins['amp'] = quanta
#cut interactions without electrons or photons
ins = ins[ins["amp"] > 0]
return ins
@export
def instruction_from_csv(filename):
"""Return wfsim instructions from a csv
:param filename: Path to csv file
"""
# Pandas does not grok the <U2 field 'recoil' correctly.
# Probably it loads it as some kind of string instead...
# we'll get it into the right format in the next step.
dtype_dict = dict(instruction_dtype)
df = pd.read_csv(filename,
names=list(dtype_dict.keys()),
skiprows=1,
dtype={k: v for k, v in dtype_dict.items()
if k != 'recoil'})
# Convert to records and check format
recs = df.to_records(index=False, column_dtypes=dtype_dict)
expected_dtype = np.dtype(instruction_dtype)
assert recs.dtype == expected_dtype, \
f"CSV {filename} produced wrong dtype. Got {recs.dtype}, expected {expected_dtype}."
return recs
@export
class ChunkRawRecords(object):
def __init__(self, config):
self.config = config
self.rawdata = RawData(self.config)
self.record_buffer = np.zeros(5000000, dtype=strax.record_dtype()) # 2*250 ms buffer
self.truth_buffer = np.zeros(10000, dtype=instruction_dtype + truth_extra_dtype + [('fill', bool)])
def __call__(self, instructions):
# Save the constants as privates
samples_per_record = strax.DEFAULT_RECORD_LENGTH
buffer_length = len(self.record_buffer)
dt = self.config['sample_duration']
rext = int(self.config['right_raw_extension'])
cksz = int(self.config['chunk_size'] * 1e9)
self.blevel = buffer_filled_level = 0
self.chunk_time_pre = np.min(instructions['time']) - rext
self.chunk_time = self.chunk_time_pre + cksz # Starting chunk
self.current_digitized_right = self.last_digitized_right = 0
for channel, left, right, data in self.rawdata(instructions, self.truth_buffer):
pulse_length = right - left + 1
records_needed = int(np.ceil(pulse_length / samples_per_record))
if self.rawdata.left * self.config['sample_duration'] > self.chunk_time:
self.chunk_time = self.last_digitized_right * self.config['sample_duration']
yield from self.final_results()
self.chunk_time_pre = self.chunk_time
self.chunk_time += cksz
if self.blevel + records_needed > buffer_length:
log.warning('Chunck size too large, insufficient record buffer')
yield from self.final_results()
if self.blevel + records_needed > buffer_length:
log.warning('Pulse length too large, insufficient record buffer, skipping pulse')
continue
# WARNING baseline and area fields are zeros before finish_results
s = slice(self.blevel, self.blevel + records_needed)
self.record_buffer[s]['channel'] = channel
self.record_buffer[s]['dt'] = dt
self.record_buffer[s]['time'] = dt * (left + samples_per_record * np.arange(records_needed))
self.record_buffer[s]['length'] = [min(pulse_length, samples_per_record * (i+1))
- samples_per_record * i for i in range(records_needed)]
self.record_buffer[s]['pulse_length'] = pulse_length
self.record_buffer[s]['record_i'] = np.arange(records_needed)
self.record_buffer[s]['data'] = np.pad(data,
(0, records_needed * samples_per_record - pulse_length), 'constant').reshape((-1, samples_per_record))
self.blevel += records_needed
if self.rawdata.right != self.current_digitized_right:
self.last_digitized_right = self.current_digitized_right
self.current_digitized_right = self.rawdata.right
yield from self.final_results()
def final_results(self):
records = self.record_buffer[:self.blevel] # No copying the records from buffer
maska = records['time'] <= self.last_digitized_right * self.config['sample_duration']
records = records[maska]
records = strax.sort_by_time(records) # Do NOT remove this line
# strax.baseline(records) Will be done w/ pulse processing
strax.integrate(records)
# Yield an appropriate amount of stuff from the truth buffer
# and mark it as available for writing again
maskb = (
self.truth_buffer['fill'] &
# This condition will always be false if self.truth_buffer['t_first_photon'] == np.nan
((self.truth_buffer['t_first_photon']
<= self.last_digitized_right * self.config['sample_duration']) |
# Hence, we need to use this trick to also save these cases (this
# is what we set the end time to for np.nans)
(np.isnan(self.truth_buffer['t_first_photon']) &
(self.truth_buffer['time']
<= self.last_digitized_right * self.config['sample_duration'])
)))
truth = self.truth_buffer[maskb] # This is a copy, not a view!
# Careful here: [maskb]['fill'] = ... does not work
# numpy creates a copy of the array on the first index.
# The assignment then goes to the (unused) copy.
# ['fill'][maskb] leads to a view first, then the advanced
# assignment works into the original array as expected.
self.truth_buffer['fill'][maskb] = False
truth.sort(order='time')
# Return truth without 'fill' field
_truth = np.zeros(len(truth), dtype=instruction_dtype + truth_extra_dtype)
for name in _truth.dtype.names:
_truth[name] = truth[name]
_truth['time'][~np.isnan(_truth['t_first_photon'])] = \
_truth['t_first_photon'][~np.isnan(_truth['t_first_photon'])].astype(int)
yield dict(raw_records=records, truth=_truth)
self.record_buffer[:np.sum(~maska)] = self.record_buffer[:self.blevel][~maska]
self.blevel = np.sum(~maska)
def source_finished(self):
return self.rawdata.source_finished
@strax.takes_config(
strax.Option('fax_file', default=None, track=True,
help="Directory with fax instructions"),
strax.Option('fax_config_override', default=None,
help="Dictionary with configuration option overrides"),
strax.Option('event_rate', default=5, track=False,
help="Average number of events per second"),
strax.Option('chunk_size', default=5, track=False,
help="Duration of each chunk in seconds"),
strax.Option('nchunk', default=4, track=False,
help="Number of chunks to simulate"),
strax.Option('fax_config',
default='https://raw.githubusercontent.com/XENONnT/'
'strax_auxiliary_files/master/fax_files/fax_config_1t.json'),
strax.Option('to_pe_file',
default='https://raw.githubusercontent.com/XENONnT/'
'strax_auxiliary_files/master/to_pe.npy'),
strax.Option('gain_model',
default=('to_pe_per_run',
'https://raw.githubusercontent.com/XENONnT/'
'strax_auxiliary_files/master/to_pe.npy'),
help='PMT gain model. Specify as (model_type, model_config)'),
strax.Option('right_raw_extension', default=50000),
strax.Option('zle_threshold', default=0),
strax.Option('detector',default='XENON1T', track=True),
strax.Option('timeout', default=1800,
help="Terminate processing if any one mailbox receives "
"no result for more than this many seconds"))
class FaxSimulatorPlugin(strax.Plugin):
depends_on = tuple()
# Cannot arbitrarily rechunk records inside events
rechunk_on_save = False
# Simulator uses iteration semantics, so the plugin has a state
# TODO: this seems avoidable...
parallel = False
# TODO: this state is needed for sorting checks,
# but it prevents prevent parallelization
last_chunk_time = -999999999999999
# A very very long input timeout, our simulator takes time
input_timeout = 3600 # as an hour
def setup(self):
c = self.config
c.update(get_resource(c['fax_config'], fmt='json'))
# Update gains to the nT defaults
self.to_pe = get_to_pe(self.run_id, ('to_pe_per_run',self.config['to_pe_file']),
len(c['channels_in_detector']['tpc']))
c['gains'] = 1 / self.to_pe * (1e-8 * 2.25 / 2**14) / (1.6e-19 * 10 * 50)
c['gains'][self.to_pe==0] = 0
overrides = self.config['fax_config_override']
if overrides is not None:
c.update(overrides)
if c['fax_file']:
if c['fax_file'][-5:] == '.root':
self.instructions = read_g4(c['fax_file'])
c['nevents'] = np.max(self.instructions['event_number'])
else:
self.instructions = instruction_from_csv(c['fax_file'])
c['nevents'] = np.max(self.instructions['event_number'])
else:
self.instructions = rand_instructions(c)
assert np.all(self.instructions['x']**2 + self.instructions['y']**2 < 2500), \
"Interation is outside the TPC"
assert np.all(self.instructions['z'] < 0.25) & np.all(self.instructions['z'] > -100), \
"Interation is outside the TPC"
assert np.all(self.instructions['amp'] > 0), \
"Interaction has zero size"
def _sort_check(self, result):
if len(result) == 0: return
if result['time'][0] < self.last_chunk_time + 1000:
raise RuntimeError(
"Simulator returned chunks with insufficient spacing. "
f"Last chunk's max time was {self.last_chunk_time}, "
f"this chunk's first time is {result['time'][0]}.")
if np.diff(result['time']).min() < 0:
raise RuntimeError("Simulator returned non-sorted records!")
self.last_chunk_time = result['time'].max()
@export
class RawRecordsFromFax(FaxSimulatorPlugin):
provides = ('raw_records', 'truth')
data_kind = {k: k for k in provides}
def setup(self):
super().setup()
self.sim = ChunkRawRecords(self.config)
self.sim_iter = self.sim(self.instructions)
def infer_dtype(self):
dtype = dict(raw_records=strax.record_dtype(),
truth=instruction_dtype + truth_extra_dtype)
return dtype
def is_ready(self, chunk_i):
"""Overwritten to mimic online input plugin.
Returns False to check source finished;
Returns True to get next chunk.
"""
if 'ready' not in self.__dict__: self.ready = False
self.ready ^= True # Flip
return self.ready
def source_finished(self):
"""Return whether all instructions has been used."""
return self.sim.source_finished()
def compute(self, chunk_i):
try:
result = next(self.sim_iter)
except StopIteration:
raise RuntimeError("Bug in chunk count computation")
self._sort_check(result['raw_records'])
return {data_type:self.chunk(
start=self.sim.chunk_time_pre,
end=self.sim.chunk_time,
data=result[data_type],
data_type=data_type) for data_type in self.provides}
``` |
{
"source": "jorana/wimprates",
"score": 2
} |
#### File: wimprates/wimprates/electron.py
```python
import numericalunits as nu
import numpy as np
from scipy.interpolate import RegularGridInterpolator, interp1d
from scipy.integrate import quad, dblquad
import wimprates as wr
export, __all__ = wr.exporter()
__all__ += ['dme_shells', 'l_to_letter', 'l_to_number']
# Load form factor and construct interpolators
shell_data = wr.load_pickle('dme/dme_ionization_ff.pkl')
for _shell, _sd in shell_data.items():
_sd['log10ffsquared_itp'] = RegularGridInterpolator(
(_sd['lnks'], _sd['lnqs']),
np.log10(_sd['ffsquared']),
bounds_error=False, fill_value=-float('inf'),)
dme_shells = [(5, 1), (5, 0), (4, 2), (4, 1), (4, 0)]
l_to_number = dict(s=0, p=1, d=2, f=3)
l_to_letter = {v: k for k, v in l_to_number.items()}
@export
def shell_str(n, l):
if isinstance(l, str):
return str(n) + l
return str(n) + l_to_letter[l]
@export
def dme_ionization_ff(shell, e_er, q):
"""Return dark matter electron scattering ionization form factor
Outside the parametrized range, the form factor is assumed 0
to give conservative results.
:param shell: Name of atomic shell, e.g. '4p'
Note not all shells are included in the data.
:param e_er: Electronic recoil energy
:param q: Momentun transfer
"""
if isinstance(shell, tuple):
shell = shell_str(*shell)
lnq = np.log(q / (nu.me * nu.c0 * nu.alphaFS))
# From Mathematica: (*ER*) (2 lnkvalues[[j]])/Log[10]
# log10 (E/Ry) = 2 lnk / ln10
# lnk = log10(E/Ry) * ln10 / 2
# = lng(E/Ry) / 2
# Ry = rydberg = 13.6 eV
ry = nu.me * nu.e ** 4 / (8 * nu.eps0 ** 2 * nu.hPlanck ** 2)
lnk = np.log(e_er / ry) / 2
return 10**(shell_data[shell]['log10ffsquared_itp'](
np.vstack([lnk, lnq]).T))
@export
def binding_es_for_dme(n, l):
"""Return binding energy of Xenon's (n, l) orbital
according to Essig et al. 2017 Table II
Note these are different from e.g. Ibe et al. 2017!
"""
return {'4s': 213.8,
'4p': 163.5,
'4d': 75.6,
'5s': 25.7,
'5p': 12.4}[shell_str(n, l)] * nu.eV
@export
def v_min_dme(eb, erec, q, mw):
"""Minimal DM velocity for DM-electron scattering
:param eb: binding energy of shell
:param erec: electronic recoil energy energy
:param q: momentum transfer
:param mw: DM mass
"""
return (erec + eb) / q + q / (2 * mw)
# Precompute velocity integrals for t=None
@export
def velocity_integral_without_time(halo_model=None):
halo_model = wr.StandardHaloModel() if halo_model is None else halo_model
_v_mins = np.linspace(0, 1, 1000) * wr.v_max(None, halo_model.v_esc)
_ims = np.array([
quad(lambda v: 1 / v * halo_model.velocity_dist(v,None),
_v_min,
wr.v_max(None, halo_model.v_esc ))[0]
for _v_min in _v_mins])
# Store interpolator in km/s rather than unit-dependent numbers
# so we don't have to recalculate them when nu.reset_units() is called
inverse_mean_speed_kms = interp1d(
_v_mins / (nu.km/nu.s),
_ims * (nu.km/nu.s),
# If we don't have 0 < v_min < v_max, we want to return 0
# so the integrand vanishes
fill_value=0, bounds_error=False)
return inverse_mean_speed_kms
inverse_mean_speed_kms = velocity_integral_without_time()
@export
@wr.vectorize_first
def rate_dme(erec, n, l, mw, sigma_dme,
f_dm='1',
t=None, halo_model = None, **kwargs):
"""Return differential rate of dark matter electron scattering vs energy
(i.e. dr/dE, not dr/dlogE)
:param erec: Electronic recoil energy
:param n: Principal quantum numbers of the shell that is hit
:param l: Angular momentum quantum number of the shell that is hit
:param mw: DM mass
:param sigma_dme: DM-free electron scattering cross-section at fixed
momentum transfer q=0
:param f_dm: One of the following:
'1': |F_DM|^2 = 1, contact interaction / heavy mediator (default)
'1_q': |F_DM|^2 = (\alpha m_e c / q), dipole moment
'1_q2': |F_DM|^2 = (\alpha m_e c / q)^2, ultralight mediator
:param t: A J2000.0 timestamp.
If not given, a conservative velocity distribution is used.
:param halo_model: class (default to standard halo model) containing velocity distribution
"""
halo_model = wr.StandardHaloModel() if halo_model is None else halo_model
shell = shell_str(n, l)
eb = binding_es_for_dme(n, l)
f_dm = {
'1': lambda q: 1,
'1_q': lambda q: nu.alphaFS * nu.me * nu.c0 / q,
'1_q2': lambda q: (nu.alphaFS * nu.me * nu.c0 / q)**2
}[f_dm]
# No bounds are given for the q integral
# but the form factors are only specified in a limited range of q
qmax = (np.exp(shell_data[shell]['lnqs'].max())
* (nu.me * nu.c0 * nu.alphaFS))
if t is None:
# Use precomputed inverse mean speed,
# so we only have to do a single integral
def diff_xsec(q):
vmin = v_min_dme(eb, erec, q, mw)
result = q * dme_ionization_ff(shell, erec, q) * f_dm(q)**2
# Note the interpolator is in kms, not unit-carrying numbers
# see above
result *= inverse_mean_speed_kms(vmin / (nu.km/nu.s))
result /= (nu.km/nu.s)
return result
r = quad(diff_xsec, 0, qmax)[0]
else:
# Have to do double integral
# Note dblquad expects the function to be f(y, x), not f(x, y)...
def diff_xsec(v, q):
result = q * dme_ionization_ff(shell, erec, q) * f_dm(q)**2
result *= 1 / v * halo_model.velocity_dist(v, t)
return result
r = dblquad(
diff_xsec,
0,
qmax,
lambda q: v_min_dme(eb, erec, q, mw),
lambda _: wr.v_max(t, halo_model.v_esc),
**kwargs)[0]
mu_e = mw * nu.me / (mw + nu.me)
return (
# Convert cross-section to rate, as usual
halo_model.rho_dm / mw * (1 / wr.mn())
# d/lnE -> d/E
* 1 / erec
# Prefactors in cross-section
* sigma_dme / (8 * mu_e ** 2)
* r)
``` |
{
"source": "joranbeasley/Getting-Started-with-Modern-Python",
"score": 4
} |
#### File: Getting-Started-with-Modern-Python/Section4/generators_itertools.py
```python
import itertools
import random
for k in itertools.count():
if all(random.randint(1,6)==2 for _ in range(3)):
break
print(f"Took {k} tries to get 3 2's")
# for i in itertools.count(1):
# s = "Fizz" if not i%3 else ""
# s += "Buzz" if not i%5 else ""
# s = s if s else str(i)
# print(s)
# for row_class in itertools.cycle(["even","odd","nothing"]):
# print(f"I am {row_class}")
# print(f'<div class="{row_class}">')
# print(list(itertools.combinations(['a','b','c','d'],2)))
# print(list(itertools.permutations("abcd",2)))
# print(list(itertools.product("abc","123")))
# print(list(itertools.product([0,1],repeat=4)))
def my_counter():
my_number = 1
while True:
yield my_number
my_number = my_number + 1
for i in my_counter():
print(i)
```
#### File: Getting-Started-with-Modern-Python/Section5/blackjack_game.py
```python
import random
from Section5.blackjack import Card
from Section5.blackjack import Hand
from Section5.functions01 import get_valid_input
class BlackjackGame(object):
dealer = None
player = None
deck = None
def shuffle(self):
self.deck = [Card( suit,val) for val in range(13) for suit in range(4)]
random.shuffle(self.deck)
def deal(self):
self.shuffle()
self.player = Hand().add_card(self.deck.pop(0), self.deck.pop(0), )
self.dealer = Hand().add_card(self.deck.pop(0), self.deck.pop(0), )
def ask_player_hit_or_stay(self):
input_validator = lambda inp: inp.lower()[0] in "hs"
prompt = "[H]it or [S]tand?"
error_msg = "Please enter [H]it or [S]tand"
return get_valid_input(prompt, error_msg, input_validator)
def play(self):
self.deal()
print("Dealer Shows:", self.dealer._cards[0].toString(False))
print("You Have: ", self.player.toString())
action = self.ask_player_hit_or_stay()
while action.lower().startswith("h"):
try:
self.player.add_card(self.deck.pop(0))
except Exception as e:
print("You Draw:", self.player._cards[-1].toString(False))
print("PLAYER BUSTS!!!")
else:
print("You Draw:", self.player._cards[-1].toString(False))
print("You Have: ", self.player.toString())
print("Score:", self.player.score)
action = self.ask_player_hit_or_stay()
while self.dealer.score < 17:
try:
self.dealer.add_card(self.deck.pop(0))
except:
print("DEALER BUSTS!!!!")
print("DEALER:",self.dealer.toString())
print("PLAYER:",self.player.toString())
def MainLoop(self):
self.play()
while input("play again?").lower().startswith("y"):
self.play()
if __name__ == "__main__":
BlackjackGame().MainLoop()
```
#### File: Section5/blackjack/hand.py
```python
from typing import List
from Section5.blackjack import Card
class Hand(object):
def __init__(self):
self._cards: List[Card] = []
self._total = None
@property
def score(self):
return self._total
def add_card(self,*cards: Card):
for card in cards:
self._cards.append(card)
if self.recalculate_hand_score() > 21:
raise Exception("BUST!!!!")
return self
def recalculate_hand_score(self):
total = 0
aces = 0
for card in self._cards:
total += card.get_points()
aces += int(card._val == 0) # int(True) is 1
while aces > 0 and total > 21:
aces -= 1
total -= 10
self._total = total
return total
def toString(self) -> str:
cards = ("[ {card} ]".format(card=c.toString()) for c in self._cards)
return " ".join(cards)
``` |
{
"source": "joranbeasley/Raccoon",
"score": 2
} |
#### File: raccoon_src/utils/help_utils.py
```python
import os
import distutils.spawn
from collections import Counter
from subprocess import PIPE, check_call, CalledProcessError
from requests.exceptions import ConnectionError
from raccoon_src.utils.exceptions import RaccoonException, ScannerException, RequestHandlerException
from raccoon_src.utils.request_handler import RequestHandler
class HelpUtilities:
PATH = ""
@classmethod
def validate_target_is_up(cls, host):
cmd = "ping -c 1 {}".format(host.target)
try:
check_call(cmd.split(), stdout=PIPE, stderr=PIPE)
return
except CalledProcessError:
# Maybe ICMP is blocked. Try web server
try:
if host.port == 443 or host.port == 80:
url = "{}://{}".format(host.protocol, host.target)
else:
url = "{}://{}:{}".format(host.protocol, host.target, host.port)
rh = RequestHandler()
rh.send("GET", url=url, timeout=15)
return
except (ConnectionError, RequestHandlerException):
raise RaccoonException("Target {} seems to be down (no response to ping or from a web server"
" at port {}).\nRun with --skip-health-check to ignore hosts"
" considered as down.".format(host, host.port))
@classmethod
def validate_wordlist_args(cls, proxy_list, wordlist, subdomain_list):
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
if wordlist and not os.path.isfile(wordlist):
raise FileNotFoundError("Not a valid file path, {}".format(wordlist))
if subdomain_list and not os.path.isfile(subdomain_list):
raise FileNotFoundError("Not a valid file path, {}".format(wordlist))
@classmethod
def validate_port_range(cls, port_range):
"""Validate port range for Nmap scan"""
ports = port_range.split("-")
if all(ports) and int(ports[-1]) <= 65535 and not len(ports) != 2:
return True
raise ScannerException("Invalid port range {}".format(port_range))
@classmethod
def validate_proxy_args(cls, *args):
"""No more than 1 of the following can be specified: tor_routing, proxy, proxy_list"""
supplied_proxies = Counter((not arg for arg in (*args,))).get(False)
if not supplied_proxies:
return
elif supplied_proxies > 1:
raise RaccoonException("Must specify only one of the following:\n"
"--tor-routing, --proxy-list, --proxy")
@classmethod
def determine_verbosity(cls, quiet):
if quiet:
return "CRITICAL"
else:
return "INFO"
@classmethod
def find_nmap_executable(cls):
return distutils.spawn.find_executable("nmap")
@classmethod
def find_openssl_executable(cls):
return distutils.spawn.find_executable("openssl")
@classmethod
def validate_executables(cls):
if not (cls.find_nmap_executable() and cls.find_openssl_executable()):
raise RaccoonException("Could not find Nmap or OpenSSL "
"installed. Please install them and run Raccoon again.")
return
@classmethod
def create_output_directory(cls, outdir):
"""Tries to create base output directory"""
cls.PATH = outdir
try:
os.mkdir(outdir)
except FileExistsError:
pass
@classmethod
def get_output_path(cls, module_path):
return "{}/{}".format(cls.PATH, module_path)
@classmethod
def confirm_traffic_routs_through_tor(cls):
rh = RequestHandler()
try:
page = rh.send("GET", url="https://check.torproject.org")
if "Congratulations. This browser is configured to use Tor." in page.text:
return
elif "Sorry. You are not using Tor" in page.text:
raise RaccoonException("Traffic does not seem to be routed through Tor.\nExiting")
except RequestHandlerException:
raise RaccoonException("Tor service seems to be down - not able to connect to 127.0.0.1:9050.\nExiting")
@classmethod
def query_dns_dumpster(cls, host):
# Start DNS Dumpster session for the token
request_handler = RequestHandler()
dnsdumpster_session = request_handler.get_new_session()
url = "https://dnsdumpster.com"
if host.naked:
target = host.naked
else:
target = host.target
payload = {
"targetip": target,
"csrfmiddlewaretoken": None
}
try:
dnsdumpster_session.get(url, timeout=10)
jar = dnsdumpster_session.cookies
for c in jar:
if not c.__dict__.get("name") == "csrftoken":
continue
payload["csrfmiddlewaretoken"] = c.__dict__.get("value")
break
return dnsdumpster_session.post(url, data=payload, headers={"Referer": "https://dnsdumpster.com/"})
except ConnectionError:
raise RaccoonException
@classmethod
def extract_hosts_from_cidr(cls):
pass
@classmethod
def extract_hosts_from_range(cls):
pass
``` |
{
"source": "JoranHonig/EthereumAnalysis",
"score": 2
} |
#### File: EthereumAnalysis/ContractNotifier/Notifier.py
```python
class Notifier:
def __initialize__(self):
self.callback = lambda x, y, z : x
def encounter(self, address, source=None):
self.callback(address, self, source)
def scan(self):
pass
``` |
{
"source": "JoranHonig/mythx-models",
"score": 2
} |
#### File: tests/strategies/group.py
```python
from hypothesis.strategies import (
composite,
datetimes,
integers,
lists,
sampled_from,
text,
uuids,
)
@composite
def group_data(draw):
return {
"completedAt": str(draw(datetimes())),
"createdAt": str(draw(datetimes())),
"createdBy": str(draw(text())),
"id": str(draw(uuids())),
"mainSourceFiles": draw(lists(text(), max_size=3)),
"name": draw(text()),
"numAnalyses": {
"total": draw(integers()),
"queued": draw(integers()),
"running": draw(integers()),
"failed": draw(integers()),
"finished": draw(integers()),
},
"numVulnerabilities": {
"high": draw(integers()),
"medium": draw(integers()),
"low": draw(integers()),
"none": draw(integers()),
},
"progress": draw(integers()),
"projectId": str(draw(uuids())),
"status": draw(sampled_from(["opened", "sealed"])),
}
@composite
def group_status_response(draw):
return draw(group_data())
@composite
def group_status_request(draw):
return {"groupId": str(draw(uuids()))}
@composite
def group_operation_response(draw):
return draw(group_data())
@composite
def group_operation_request(draw):
return {
"groupId": str(draw(uuids())),
"type": draw(
sampled_from(["seal_group", "add_to_project", "remove_from_project"])
),
}
@composite
def group_list_response(draw):
groups = draw(lists(group_data(), max_size=3))
return {"groups": groups, "total": len(groups)}
@composite
def group_list_request(draw):
return {
"offset": draw(integers()),
"createdBy": draw(text()),
"groupName": draw(text()),
"dateFrom": draw(datetimes()),
"dateTo": draw(datetimes()),
}
@composite
def group_creation_response(draw):
return draw(group_data())
@composite
def group_creation_request(draw):
return {"groupName": draw(text())}
``` |
{
"source": "JoranHonig/pythx",
"score": 3
} |
#### File: pythx/middleware/group_data.py
```python
import logging
from typing import Dict
from pythx.types import RESPONSE_MODELS, REQUEST_MODELS
from pythx.middleware.base import BaseMiddleware
LOGGER = logging.getLogger("GroupDataMiddleware")
class GroupDataMiddleware(BaseMiddleware):
"""This middleware fills the :code:`groupId` and :code:`groupName` fields
when submitting a new analysis job.
This means that only :code:`process_request` carries business logic, while
:code:`process_response` returns the input response object right away without touching it.
"""
def __init__(self, group_id: str = None, group_name: str = None):
LOGGER.debug("Initializing")
self.group_id = group_id
self.group_name = group_name
def process_request(self, req: REQUEST_MODELS) -> Dict:
"""Add the :code:`groupId` and/or :code:`groupName` field if the
request we are making is the submission of a new analysis job.
Because we execute the middleware on the request data dictionary, we cannot simply
match the domain model type here. However, based on the endpoint and the request
method we can determine that a new job has been submitted. In any other case, we
return the request right away without touching it.
:param req: The request's data dictionary
:return: The request's data dictionary, optionally with the group data field(s) filled in
"""
if not (req["method"] == "POST" and req["url"].endswith("/analyses")):
return req
if self.group_id:
LOGGER.debug("Adding group ID %s to request", self.group_id)
req["payload"]["groupId"] = self.group_id
if self.group_name:
LOGGER.debug("Adding group name %s to request", self.group_name)
req["payload"]["groupName"] = self.group_name
return req
def process_response(self, resp: RESPONSE_MODELS) -> RESPONSE_MODELS:
"""This method is irrelevant for adding our group data, so we don't do
anything here.
We still have to define it, though. Otherwise when calling the abstract base class'
:code:`process_response` method, we will encounter an exception.
:param resp: The response domain model
:return: The very same response domain model
"""
LOGGER.debug("Forwarding the response without any action")
return resp
```
#### File: pythx/tests/common.py
```python
import json
from pathlib import Path
from mythx_models.response import DetectedIssuesResponse
def get_test_case(path: str, obj=None):
with open(str(Path(__file__).parent / path)) as f:
dict_data = json.load(f)
if obj is None:
return dict_data
if obj is DetectedIssuesResponse and type(dict_data) is list:
return obj(issue_reports=dict_data)
else:
return obj(**dict_data)
def generate_request_dict(req):
return {
"method": req.method,
"payload": req.payload,
"params": req.parameters,
"headers": req.headers,
"url": "https://test.com/" + req.endpoint,
}
```
#### File: pythx/tests/test_api_handler.py
```python
import pytest
from mythx_models import response as respmodels
from mythx_models.exceptions import MythXAPIError
from mythx_models.request import (
AnalysisListRequest,
AnalysisStatusRequest,
AnalysisSubmissionRequest,
AuthLoginRequest,
AuthLogoutRequest,
AuthRefreshRequest,
DetectedIssuesRequest,
)
from pythx.api.handler import DEFAULT_API_URL, APIHandler
from pythx.middleware.base import BaseMiddleware
from .common import get_test_case
class TestMiddleware(BaseMiddleware):
def process_request(self, req):
req["test"] = "test"
return req
def process_response(self, resp):
return resp
API_HANDLER = APIHandler(middlewares=[TestMiddleware()])
def assert_request_dict_keys(d):
assert d.get("method") is not None
assert d.get("payload") is not None
assert d.get("headers") is not None
assert d.get("url") is not None
assert d.get("params") is not None
def assert_request_dict_content(d, request_obj):
assert d["method"] == request_obj.method
assert d["payload"] == request_obj.payload
assert d["headers"] == request_obj.headers
assert d["params"] == request_obj.parameters
assert request_obj.endpoint in d["url"]
# check middleware request processing
assert d["test"] == "test"
@pytest.mark.parametrize(
"request_obj",
[
get_test_case("testdata/analysis-list-request.json", AnalysisListRequest),
get_test_case("testdata/detected-issues-request.json", DetectedIssuesRequest),
get_test_case("testdata/analysis-status-request.json", AnalysisStatusRequest),
get_test_case(
"testdata/analysis-submission-request.json", AnalysisSubmissionRequest
),
get_test_case("testdata/auth-login-request.json", AuthLoginRequest),
get_test_case("testdata/auth-logout-request.json", AuthLogoutRequest),
get_test_case("testdata/auth-refresh-request.json", AuthRefreshRequest),
],
)
def test_request_dicts(request_obj):
req_dict = API_HANDLER.assemble_request(request_obj)
assert_request_dict_keys(req_dict)
assert_request_dict_content(req_dict, request_obj)
assert req_dict["url"].startswith(DEFAULT_API_URL)
def test_middleware_default_empty():
assert APIHandler().middlewares == []
def assert_analysis(analysis, data):
assert analysis.api_version == data["apiVersion"]
assert analysis.maru_version == data["maruVersion"]
assert analysis.mythril_version == data["mythrilVersion"]
assert analysis.run_time == data["runTime"]
assert analysis.queue_time == data["queueTime"]
assert analysis.status.title() == data["status"]
assert analysis.submitted_at == data["submittedAt"]
assert analysis.submitted_by == data["submittedBy"]
assert analysis.uuid == data["uuid"]
def test_parse_analysis_list_response():
test_dict = get_test_case("testdata/analysis-list-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AnalysisListResponse
)
for i, analysis in enumerate(model.analyses):
response_obj = test_dict["analyses"][i]
assert_analysis(analysis, response_obj)
def test_parse_analysis_status_response():
test_dict = get_test_case("testdata/analysis-status-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AnalysisStatusResponse
)
assert_analysis(model, test_dict)
def test_parse_analysis_submission_response():
test_dict = get_test_case("testdata/analysis-status-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AnalysisSubmissionResponse
)
assert model.api_version == test_dict["apiVersion"]
assert model.maru_version == test_dict["maruVersion"]
assert model.mythril_version == test_dict["mythrilVersion"]
assert model.harvey_version == test_dict["harveyVersion"]
assert model.queue_time == test_dict["queueTime"]
assert model.status.title() == test_dict["status"]
assert model.submitted_at == test_dict["submittedAt"]
assert model.submitted_by == test_dict["submittedBy"]
assert model.uuid == test_dict["uuid"]
def test_parse_detected_issues_response():
test_dict = get_test_case("testdata/detected-issues-response.json")
expected_report = test_dict[0]
model = API_HANDLER.parse_response(
test_dict, respmodels.DetectedIssuesResponse
)
issue = model.issue_reports[0].issues[0].dict(by_alias=True)
issue["decodedLocations"] = [list(t) for t in issue["decodedLocations"]]
assert issue == expected_report["issues"][0]
assert model.issue_reports[0].source_type == expected_report["sourceType"]
assert model.issue_reports[0].source_format == expected_report["sourceFormat"]
assert model.issue_reports[0].source_list == expected_report["sourceList"]
assert model.issue_reports[0].meta_data == expected_report["meta"]
def test_parse_login_response():
test_dict = get_test_case("testdata/auth-login-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AuthLoginResponse
)
assert model.access_token == test_dict["jwtTokens"]["access"]
assert model.refresh_token == test_dict["jwtTokens"]["refresh"]
def test_parse_refresh_response():
test_dict = get_test_case("testdata/auth-refresh-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AuthRefreshResponse
)
assert model.access_token == test_dict["jwtTokens"]["access"]
assert model.refresh_token == test_dict["jwtTokens"]["refresh"]
def test_parse_logout_response():
test_dict = get_test_case("testdata/auth-logout-response.json")
model = API_HANDLER.parse_response(
test_dict, respmodels.AuthLogoutResponse
)
assert model.dict() == {}
assert model.json() == "{}"
def test_send_request_successful(requests_mock):
test_url = "mock://test.com/path"
requests_mock.get(test_url, text='{"resp":"test"}')
resp = APIHandler.send_request(
{"method": "GET", "headers": {}, "url": test_url, "payload": {}, "params": {}},
auth_header={"Authorization": "Bearer foo"},
)
assert resp == {"resp": "test"}
assert requests_mock.called == 1
h = requests_mock.request_history[0]
assert h.method == "GET"
assert h.url == test_url
assert h.headers["Authorization"] == "Bearer foo"
def test_send_request_failure(requests_mock):
test_url = "mock://test.com/path"
requests_mock.get(test_url, text='{"resp":"test"}', status_code=400)
with pytest.raises(MythXAPIError):
APIHandler.send_request(
{
"method": "GET",
"headers": {},
"url": test_url,
"payload": {},
"params": {},
},
auth_header={"Authorization": "Bearer foo"},
)
assert requests_mock.called == 1
h = requests_mock.request_history[0]
assert h.method == "GET"
assert h.url == test_url
assert h.headers["Authorization"] == "Bearer foo"
def test_send_request_unauthenticated(requests_mock):
test_url = "mock://test.com/path"
requests_mock.get("mock://test.com/path", text='{"resp":"test"}', status_code=400)
with pytest.raises(MythXAPIError):
APIHandler.send_request(
{
"method": "GET",
"headers": {},
"url": test_url,
"payload": {},
"params": {},
}
)
assert requests_mock.called == 1
h = requests_mock.request_history[0]
assert h.method == "GET"
assert h.url == test_url
assert h.headers.get("Authorization") is None
```
#### File: pythx/tests/test_property_checking_middleware.py
```python
import pytest
from mythx_models.request import (
AnalysisListRequest,
AnalysisStatusRequest,
AnalysisSubmissionRequest,
AuthLoginRequest,
AuthLogoutRequest,
AuthRefreshRequest,
DetectedIssuesRequest,
)
from mythx_models.response import (
AnalysisListResponse,
AnalysisStatusResponse,
AnalysisSubmissionResponse,
AuthLoginResponse,
AuthLogoutResponse,
AuthRefreshResponse,
DetectedIssuesResponse,
)
from pythx.middleware.property_checking import PropertyCheckingMiddleware
from .common import generate_request_dict, get_test_case
DEFAULT_PC_MIDDLEWARE = PropertyCheckingMiddleware()
CUSTOM_PC_MIDDLEWARE = PropertyCheckingMiddleware(property_checking=True)
@pytest.mark.parametrize(
"middleware,request_dict,field_added",
[
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-list-request.json", AnalysisListRequest
)
),
False,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/detected-issues-request.json", DetectedIssuesRequest
)
),
False,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-status-request.json", AnalysisStatusRequest
)
),
False,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-submission-request.json",
AnalysisSubmissionRequest,
)
),
True,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-login-request.json", AuthLoginRequest)
),
False,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-logout-request.json", AuthLogoutRequest)
),
False,
),
(
DEFAULT_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-refresh-request.json", AuthRefreshRequest)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-list-request.json", AnalysisListRequest
)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/detected-issues-request.json", DetectedIssuesRequest
)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-status-request.json", AnalysisStatusRequest
)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case(
"testdata/analysis-submission-request.json",
AnalysisSubmissionRequest,
)
),
True,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-login-request.json", AuthLoginRequest)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-logout-request.json", AuthLogoutRequest)
),
False,
),
(
CUSTOM_PC_MIDDLEWARE,
generate_request_dict(
get_test_case("testdata/auth-refresh-request.json", AuthRefreshRequest)
),
False,
),
],
)
def test_request_dicts(middleware, request_dict, field_added):
new_request = middleware.process_request(request_dict)
if field_added:
assert (
new_request["payload"].get("propertyChecking")
== middleware.propert_checking
)
del new_request["payload"]["propertyChecking"]
# rest of the result should stay the same
assert request_dict == new_request
@pytest.mark.parametrize(
"middleware,resp_obj",
[
(
DEFAULT_PC_MIDDLEWARE,
get_test_case("testdata/analysis-list-response.json", AnalysisListResponse),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case(
"testdata/detected-issues-response.json", DetectedIssuesResponse
),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case(
"testdata/analysis-status-response.json", AnalysisStatusResponse
),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case(
"testdata/analysis-submission-response.json", AnalysisSubmissionResponse
),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case("testdata/auth-login-response.json", AuthLoginResponse),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case("testdata/auth-logout-response.json", AuthLogoutResponse),
),
(
DEFAULT_PC_MIDDLEWARE,
get_test_case("testdata/auth-refresh-response.json", AuthRefreshResponse),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case("testdata/analysis-list-response.json", AnalysisListResponse),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case(
"testdata/detected-issues-response.json", DetectedIssuesResponse
),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case(
"testdata/analysis-status-response.json", AnalysisStatusResponse
),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case(
"testdata/analysis-submission-response.json", AnalysisSubmissionResponse
),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case("testdata/auth-login-response.json", AuthLoginResponse),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case("testdata/auth-logout-response.json", AuthLogoutResponse),
),
(
CUSTOM_PC_MIDDLEWARE,
get_test_case("testdata/auth-refresh-response.json", AuthRefreshResponse),
),
],
)
def test_response_models(middleware, resp_obj):
new_resp_obj = middleware.process_response(resp_obj)
assert new_resp_obj == resp_obj
``` |
{
"source": "JoranSlingerland/StockTracker",
"score": 3
} |
#### File: StockTracker/add_data_to_stocks_held/__init__.py
```python
import logging
from datetime import datetime, timedelta
def main(payload: str) -> str:
"""add data to stocks held"""
logging.info("Adding stock data to stocks held")
# get data
stocks_held = payload[0]
stock_data = payload[1]
forex_data = payload[2]
stock_meta_data = payload[3]
output_list = []
# initialize variables
for stock in stocks_held["stocks_held"]:
days_to_substract = 0
while True:
try:
date_string = f"{stock['date']} 00:00:00"
date_object = datetime.fromisoformat(date_string)
date_object = date_object - timedelta(days=days_to_substract)
date_object = date_object.strftime("%Y-%m-%d")
stock_open = float(
stock_data[stock["symbol"]]["Time Series (Daily)"][date_object][
"1. open"
]
)
stock_high = float(
stock_data[stock["symbol"]]["Time Series (Daily)"][date_object][
"2. high"
]
)
stock_low = float(
stock_data[stock["symbol"]]["Time Series (Daily)"][date_object][
"3. low"
]
)
stock_close = float(
stock_data[stock["symbol"]]["Time Series (Daily)"][date_object][
"4. close"
]
)
stock_volume = float(
stock_data[stock["symbol"]]["Time Series (Daily)"][date_object][
"5. volume"
]
)
forex_high = float(
forex_data[stock["currency"]]["Time Series FX (Daily)"][
date_object
]["2. high"]
)
stock.update(
{
"open_value": stock_open * forex_high,
"high_value": stock_high * forex_high,
"low_value": stock_low * forex_high,
"close_value": stock_close * forex_high,
"volume": stock_volume,
"total_value": stock_close * forex_high * stock["quantity"],
"total_pl": (stock_close * forex_high * stock["quantity"])
- (stock["average_cost"] * stock["quantity"]),
"total_pl_percentage": (
(stock_close * forex_high * stock["quantity"])
- (stock["average_cost"] * stock["quantity"])
)
/ (stock_close * forex_high * stock["quantity"]),
"name": stock_meta_data[f"{stock['symbol']}"]["name"],
"description": stock_meta_data[f"{stock['symbol']}"][
"description"
],
"country": stock_meta_data[f"{stock['symbol']}"]["country"],
"sector": stock_meta_data[f"{stock['symbol']}"]["sector"],
"domain": stock_meta_data[f"{stock['symbol']}"]["domain"],
"logo": stock_meta_data[f"{stock['symbol']}"]["logo"],
}
)
break
except KeyError:
days_to_substract += 1
logging.debug(
f'KeyError for {stock["symbol"]} on {date_object}. Attempting to subtract {days_to_substract} day(s)'
)
output_list.append(stock)
return {"stocks_held": output_list}
```
#### File: StockTracker/create_cosmosdb_db_and_container/__init__.py
```python
import logging
import azure.functions as func
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.exceptions as exceptions
from azure.cosmos.partition_key import PartitionKey
from shared_code import get_config
def main(req: func.HttpRequest) -> func.HttpResponse:
"""Main function"""
logging.info("Creating sql tables")
# get config
containers = (get_config.get_containers())["containers"]
cosmosdb_config = get_config.get_cosmosdb()
client = cosmos_client.CosmosClient(
cosmosdb_config["endpoint"], cosmosdb_config["key"]
)
# create database
database = client.create_database_if_not_exists(
id=cosmosdb_config["database"],
offer_throughput=cosmosdb_config["offer_throughput"],
)
# create container
for container in containers:
database.create_container_if_not_exists(
id=container["container_name"],
partition_key=PartitionKey(path=container["partition_key"]),
)
return func.HttpResponse(
body='{"result": "done"}',
mimetype="application/json",
status_code=200,
)
```
#### File: StockTracker/delete_cosmosdb_container/__init__.py
```python
import logging
import azure.functions as func
import azure.cosmos.exceptions as exceptions
from shared_code import get_config, cosmosdb_module
def main(req: func.HttpRequest) -> func.HttpResponse:
"""Main function"""
logging.info("Creating sql tables")
# get config
containers_to_delete = req.route_params.get("containers_to_delete")
containers = (get_config.get_containers())["containers"]
if not containers_to_delete:
logging.error("No containers_to_delete provided")
return func.HttpResponse(
body='{"result": "Please pass a name on the query string or in the request body"}',
mimetype="application/json",
status_code=400,
)
# get database
database = cosmosdb_module.cosmosdb_database()
# delete containers
if containers_to_delete == "all":
for container in containers:
try:
database.delete_container(container["container_name"])
except exceptions.CosmosResourceNotFoundError:
logging.info(f"Container {container['container_name']} does not exist")
elif containers_to_delete == "output_only":
for container in containers:
if container["candelete"]:
try:
database.delete_container(container["container_name"])
except exceptions.CosmosResourceNotFoundError:
logging.info(
f"Container {container['container_name']} does not exist"
)
else:
logging.error("No valid containers_to_delete provided")
return func.HttpResponse(
body='{"result": "Please pass a valid name on the query string or in the request body"}',
mimetype="application/json",
status_code=400,
)
return func.HttpResponse(
body='{"result": "done"}',
mimetype="application/json",
status_code=200,
)
```
#### File: StockTracker/delete_cosmosdb_items/__init__.py
```python
import logging
from datetime import date, timedelta
import azure.cosmos.exceptions as exceptions
from azure.cosmos.partition_key import PartitionKey
from shared_code import get_config, cosmosdb_module
def main(payload: str) -> str:
"""Function to output data to CosmosDB"""
logging.info("Outputting data to CosmosDB")
days_to_update = payload
containers = (get_config.get_containers())["containers"]
if days_to_update == "all":
recreate_containers(containers)
else:
drop_selected_dates(containers, days_to_update)
return '{"status": "Done"}'
def recreate_containers(containers):
"""Function to recreate containers"""
logging.info("Recreating containers")
database = cosmosdb_module.cosmosdb_database()
for container in containers:
if container["output_container"]:
try:
database.delete_container(container["container_name"])
except exceptions.CosmosResourceNotFoundError:
logging.info(f"Container {container['container_name']} does not exist")
for container in containers:
if container["output_container"]:
database.create_container_if_not_exists(
id=container["container_name"],
partition_key=PartitionKey(path=container["partition_key"]),
)
def drop_selected_dates(containers, days_to_update):
"""Function to drop selected dates"""
logging.info("Dropping selected dates")
today = date.today()
end_date = today.strftime("%Y-%m-%d")
start_date = (today - timedelta(days=days_to_update)).strftime("%Y-%m-%d")
for container in containers:
if (
container["output_container"]
and container["container_name"] != "single_day"
):
container_client = cosmosdb_module.cosmosdb_container(
container["container_name"]
)
for item in container_client.query_items(
query=f"SELECT * FROM c WHERE c.date >= '{start_date}' and c.date <= '{end_date}'",
enable_cross_partition_query=True,
):
logging.info(item)
container_client.delete_item(item, partition_key=item["id"])
database = cosmosdb_module.cosmosdb_database()
single_day_container_setup = [
d for d in containers if d["container_name"] == "single_day"
]
try:
database.delete_container(single_day_container_setup[0]["container_name"])
except exceptions.CosmosResourceNotFoundError:
logging.info("Container single_day does not exist")
database.create_container_if_not_exists(
id=single_day_container_setup[0]["container_name"],
partition_key=PartitionKey(path=single_day_container_setup[0]["partition_key"]),
)
def create_items(data):
"""Function to create items"""
logging.info("Creating items")
stocks_held = data["stocks_held"]
totals = data["totals"]
invested = data["invested"]
container = cosmosdb_module.cosmosdb_container("stocks_held")
for item in stocks_held:
container.upsert_item(item)
container = cosmosdb_module.cosmosdb_container("totals")
for item in totals:
container.upsert_item(item)
container = cosmosdb_module.cosmosdb_container("invested")
for item in invested:
container.upsert_item(item)
today = date.today().strftime("%Y-%m-%d")
single_day_stocks = [d for d in stocks_held if d["date"] == today]
container = cosmosdb_module.cosmosdb_container("single_day")
for item in single_day_stocks:
container.upsert_item(item)
```
#### File: StockTracker/get_forex_data_orchestrator/__init__.py
```python
import logging
import json
import azure.functions as func
import azure.durable_functions as df
def orchestrator_function(context: df.DurableOrchestrationContext):
"""get data for all currencies from api"""
logging.info("Getting forex data")
transactions = context.get_input()
# initialize variables
currencies = []
query = "FX_DAILY"
forex_data = {}
base_currency = "EUR"
# get unique currencies
for temp_loop in transactions["transactions"]:
currencies.append(temp_loop["currency"])
currencies = list(dict.fromkeys(currencies))
# get data for all currencies
for currency in currencies:
if currency == "GBX":
currency = "GBP"
url = f"https://www.alphavantage.co/query?function={query}&from_symbol={currency}&to_symbol={base_currency}&outputsize=full"
temp_data = yield context.call_activity("call_alphavantage_api", url)
gbx_data = {
"Meta Data": {
"1. Information": "Forex Daily Prices (open, high, low, close)",
"2. From Symbol": "EUR",
"3. To Symbol": "GBX",
"4. Output Size": "Full size",
"5. Last Refreshed": "2022-02-09 19:05:00",
"6. Time Zone": "UTC",
},
"Time Series FX (Daily)": {},
}
for single_date, date_data in temp_data["Time Series FX (Daily)"].items():
gbx_data["Time Series FX (Daily)"].update(
{
single_date: {
"1. open": float(date_data["1. open"]) / 100,
"2. high": float(date_data["2. high"]) / 100,
"3. low": float(date_data["3. low"]) / 100,
"4. close": float(date_data["4. close"]) / 100,
}
}
)
forex_data.update({"GBX": gbx_data})
else:
url = f"https://www.alphavantage.co/query?function={query}&from_symbol={currency}&to_symbol={base_currency}&outputsize=full"
temp_data = yield context.call_activity("call_alphavantage_api", url)
forex_data.update({currency: temp_data})
# return dictionary
return forex_data
main = df.Orchestrator.create(orchestrator_function)
```
#### File: StockTracker/get_linechart_data/__init__.py
```python
import logging
import json
from datetime import date, timedelta
import azure.functions as func
from shared_code import cosmosdb_module
def main(req: func.HttpRequest) -> func.HttpResponse:
""" "HTTP trigger function to get line chart data"""
logging.info("Getting linechart data")
datatype = req.route_params.get("datatype")
datatoget = req.route_params.get("datatoget")
if not datatype or not datatoget:
logging.error("No datatype provided")
return func.HttpResponse(
body='{"status": "Please pass a name on the query string or in the request body"}',
mimetype="application/json",
status_code=400,
)
logging.info(f"Getting data for {datatype}")
container = cosmosdb_module.cosmosdb_container("totals")
if datatoget == "max":
items = list(container.read_all_items())
else:
start_date, end_date = datatogetswitch(datatoget)
items = list(
container.query_items(
query="SELECT * FROM c WHERE c.date >= @start_date AND c.date <= @end_date",
parameters=[
{"name": "@start_date", "value": start_date},
{"name": "@end_date", "value": end_date},
],
enable_cross_partition_query=True,
)
)
result = []
for item in items:
temp_list = inputoptions(datatype, item)
for temp_item in temp_list:
result.append(temp_item)
result = sorted(result, key=lambda k: k["date"])
if not result:
return func.HttpResponse(
body='{"status": Please pass a valid name on the query string or in the request body"}',
mimetype="application/json",
status_code=400,
)
return func.HttpResponse(
body=json.dumps(result), mimetype="application/json", status_code=200
)
def inputoptions(datatype, item):
"""Home made match function"""
if datatype == "invested_and_value":
return [
{
"date": item["date"],
"value": item["total_value"],
"category": "Value",
},
{
"date": item["date"],
"value": item["total_invested"],
"category": "Invested",
},
]
# return nothing if no match
return None
def datatogetswitch(datatoget):
"""Home made match function"""
end_date = date.today()
if datatoget == "year":
start_date = end_date - timedelta(days=365)
if datatoget == "month":
start_date = end_date - timedelta(days=30)
if datatoget == "week":
start_date = end_date - timedelta(days=7)
if datatoget == "ytd":
start_date = date(end_date.year, 1, 1)
start_date = start_date.strftime("%Y-%m-%d")
end_date = end_date.strftime("%Y-%m-%d")
# return nothing if no match
return start_date, end_date
```
#### File: StockTracker/output_singleday_to_cosmosdb/__init__.py
```python
import logging
from datetime import date
from shared_code import cosmosdb_module
def main(payload: str) -> str:
"""Function to output data to CosmosDB"""
logging.info("Outputting data to CosmosDB")
data = payload
stocks_held = data["stocks_held"]
totals = data["totals"]
today = date.today().strftime("%Y-%m-%d")
single_day_stocks = [d for d in stocks_held if d["date"] == today]
container = cosmosdb_module.cosmosdb_container("single_day")
for item in single_day_stocks:
container.upsert_item(item)
single_day_totals = [d for d in totals if d["date"] == today]
container = cosmosdb_module.cosmosdb_container("single_day_totals")
for item in single_day_totals:
container.upsert_item(item)
return '{"status": "Done"}'
``` |
{
"source": "jorants/beanie",
"score": 3
} |
#### File: odm/queries/cursor.py
```python
from abc import abstractmethod
from typing import Optional, List, Union, Type, Dict, Any
from pydantic.main import BaseModel
class BaseCursorQuery:
"""
BaseCursorQuery class. Wrapper over AsyncIOMotorCursor,
which parse result with model
"""
@abstractmethod
def get_projection_model(self) -> Optional[Type[BaseModel]]:
...
@property
@abstractmethod
def motor_cursor(self):
...
def __aiter__(self):
return self
async def __anext__(self) -> Union[BaseModel, Dict[str, Any]]:
if getattr(self, "cursor", None) is None:
self.cursor = self.motor_cursor
next_item = await self.cursor.__anext__()
projection = self.get_projection_model()
return (
projection.parse_obj(next_item)
if projection is not None
else next_item
)
async def to_list(
self, length: Optional[int] = None
) -> Union[List[BaseModel], List[Dict[str, Any]]]: # noqa
"""
Get list of documents
:param length: Optional[int] - length of the list
:return: Union[List[BaseModel], List[Dict[str, Any]]]
"""
motor_list: List[Dict[str, Any]] = await self.motor_cursor.to_list(
length
)
projection = self.get_projection_model()
if projection is not None:
return [projection.parse_obj(i) for i in motor_list]
return motor_list
```
#### File: odm/utils/general.py
```python
import asyncio
import importlib
from typing import List, Type, Union, TYPE_CHECKING
from motor.motor_asyncio import AsyncIOMotorDatabase
if TYPE_CHECKING:
from beanie.odm.documents import DocType
def get_model(dot_path: str) -> Type["DocType"]:
"""
Get the model by the path in format bar.foo.Model
:param dot_path: str - dot seprated path to the model
:return: Type[DocType] - class of the model
"""
try:
module_name, class_name = dot_path.rsplit(".", 1)
return getattr(importlib.import_module(module_name), class_name)
except ValueError:
raise ValueError(
f"'{dot_path}' doesn't have '.' path, eg. path.to.your.model.class"
)
except AttributeError:
raise AttributeError(
f"module '{module_name}' has no class called '{class_name}'"
)
async def init_beanie(
database: AsyncIOMotorDatabase,
document_models: List[Union[Type["DocType"], str]],
allow_index_dropping: bool = True,
):
"""
Beanie initialization
:param database: AsyncIOMotorDatabase - motor database instance
:param document_models: List[Union[Type[DocType], str]] - model classes
or strings with dot separated paths
:param allow_index_dropping: bool - if index dropping is allowed.
Default True
:return: None
"""
collection_inits = []
for model in document_models:
if isinstance(model, str):
model = get_model(model)
collection_inits.append(
model.init_collection(
database, allow_index_dropping=allow_index_dropping
)
)
await asyncio.gather(*collection_inits)
``` |
{
"source": "jorants/rss-torrent-relay",
"score": 2
} |
#### File: jorants/rss-torrent-relay/app.py
```python
import sys, os, re, datetime
from flask import Flask, request, abort, send_from_directory, url_for
from werkzeug.contrib.atom import AtomFeed
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
import feedparser
import requests
import hashlib
import fnmatch
from config import Config
import peewee
from playhouse.db_url import connect
import flask_admin
from flask_admin.contrib.peewee import ModelView
app = Flask(__name__)
app.config.from_object(Config)
db = connect(app.config["DATABASE_URI"])
class BaseModel(peewee.Model):
class Meta:
database = db
class Show(BaseModel):
name = peewee.CharField(512)
last_season = peewee.IntegerField()
last_episode = peewee.IntegerField()
class Episode(BaseModel):
title = peewee.CharField(512)
show = peewee.ForeignKeyField(Show, backref="episodes")
season = peewee.IntegerField()
episode = peewee.IntegerField()
torrentlink = peewee.CharField(512)
added = peewee.DateTimeField(default=datetime.datetime.now)
def __repr__(self):
return "<%s %i:%i>" % (self.show, self.season, self.episode)
db_classes = BaseModel.__subclasses__()
db.create_tables(db_classes)
admin = flask_admin.Admin(
app,
name="microblog",
template_mode="bootstrap3",
url="/admin/" + app.config["URL_KEY"],
)
admin.add_view(ModelView(Show))
admin.add_view(ModelView(Episode))
def parse_title(title):
title = title.lower().replace(".", " ")
parts = re.search(app.config['PARSE_RE'], title)
if parts != None:
parsed = parts.groupdict()
parsed["show"] = parsed["show"].strip()
parsed["tags"] = parsed["tags"].strip().split()
try:
parsed["season"] = int(parsed["season"])
parsed["episode"] = int(parsed["episode"])
except ValueError:
return
return parsed
def match_tags(formats, tags):
for f in formats:
if len(fnmatch.filter(tags, f)) > 0:
return True
return False
def update_feed():
org_feed = feedparser.parse(app.config["FEED"])
for entry in reversed(org_feed.entries):
link = entry["link"]
title = entry["title"]
date = entry.published
episodeinfo = parse_title(title)
if episodeinfo == None:
continue
if Show.select().where(
Show.name == episodeinfo["show"]
).count() > 0 and match_tags(app.config["TAGS"], episodeinfo["tags"]):
show = Show.get(Show.name == episodeinfo["show"])
if (show.last_season, show.last_episode) < (
episodeinfo["season"],
episodeinfo["episode"],
):
show.last_season = episodeinfo["season"]
show.last_episode = episodeinfo["episode"]
show.save()
epp = Episode(
title=title,
show=show,
season=episodeinfo["season"],
episode=episodeinfo["episode"],
torrentlink=link,
)
print(epp)
epp.save()
scheduler = BackgroundScheduler()
scheduler.add_job(func=update_feed, trigger="interval", minutes=30)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
@app.route("/feed/" + app.config["URL_KEY"])
def feed():
update_feed()
toshow = Episode.select().order_by(Episode.added.desc()).limit(20)
feed = AtomFeed(title="My Torrents", feed_url=request.url, url=request.url_root)
for epp in toshow:
feed.add(
epp.title,
"%s S%02iE%02i" % (epp.show.name, epp.season, epp.episode),
link="/" + str(epp.id) + ".torrent",
url="/" + str(epp.id) + ".torrent",
published=epp.added,
updated=epp.added,
id=epp.id,
)
return feed.get_response()
@app.route("/feed/" + app.config["URL_KEY"] + "/<int:idd>.torrent")
def torrent(idd):
try:
epp = Episode.get_by_id(idd)
except DoesNotExist:
abort(404)
url = epp.torrentlink
fn = hashlib.sha224(url.encode()).hexdigest() + ".torrent"
# ensure the folder exists
if not os.path.exists("torrents"):
os.makedirs("torrents")
path = "torrents/" + fn
# Download if not already done
if not os.path.exists(path):
r = requests.get(url)
with open(path, "wb") as f:
f.write(r.content)
return send_from_directory("torrents", fn)
@app.route("/")
def index():
return "Not here"
if __name__ == "__main__":
update_feed()
app.run()
``` |
{
"source": "jorants/voc",
"score": 2
} |
#### File: tests/builtins/test_slice.py
```python
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class SliceTests(TranspileTestCase):
def test_slice_repr_stop(self):
self.assertCodeExecution("""
print(slice(0))
print(slice('foo'))
""")
def test_slice_repr_start_stop(self):
self.assertCodeExecution("""
print(slice(0, 100))
print(slice('foo', Exception))
""")
def test_slice_repr_start_stop_step(self):
self.assertCodeExecution("""
print(slice(0, 100, 2))
print(slice('foo', Exception, object()))
""")
class BuiltinSliceFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["slice"]
not_implemented = [
'test_class',
]
``` |
{
"source": "jorasdf24/workflow-manager",
"score": 3
} |
#### File: jorasdf24/workflow-manager/Workflow-manager.py
```python
import os
import sys
import time
import sqlite3
import urllib.request as req
import urllib.parse as p
def is_valid_url(url):
"""Return True if the URL is valid, and false if not"""
try:
request = req.Request(url)
try:
response = req.urlopen(request)
response = True
except:
response = False
except:
response = False
return response
def is_valid_path(path):
"""Return True if the path is valid and False if not"""
return os.path.exists(path)
def get_paths_based_workflow(cursor,workflow_name):
"""Take the cursor and the workflow name, and return a list of paths that are found in this workflow"""
path_list = []
for name in cursor.execute("SELECT path FROM workflows WHERE workflow_name=?;",(workflow_name,)):
path_list.append(name[0])
return path_list
def get_workflow_list(cursor):
"""Return a list that contains all the name of the workflows exists in the DB"""
workflow_list = []
for name in cursor.execute("SELECT workflow_name FROM workflows;"):
#Clean the name
name = str(name).replace("(","")
name = str(name).replace(")","")
name = str(name).replace(",","")
name = str(name).replace("'","")
workflow_list.append(name)
return workflow_list
def open_paths_from_workflow(cursor,workflow_name):
"""Launch all paths from the workflow called workflow_name"""
# iterate through each path
for path in cursor.execute("SELECT path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "';"):
try:
# Start the path
os.startfile(path[0])
time.sleep(0.1)
except Exception:
print ("Error opening file: " + str(path[0]))
# There is at least one path
is_workflow_exist = True
if not is_workflow_exist:
print ("This workflow does not exist...")
else:
print ("Enjoy")
def print_menu():
"""Print the Main Menu"""
print ("\n1 - Start workflow")
print ("2 - Create new workflow")
print ("3 - Edit workflow")
print ("4 - Delete workflow")
print ("5 - Print workflows")
print ("6 - Exit")
def print_menu2():
"""Print the Sub Menu to the third option of the Main Menu"""
print ("\n\t1 - Change workflow name")
print ("\t2 - Add path")
print ("\t3 - Delete Path")
print ("\t4 - Exit edit")
def workflow_exists(data_base, workflow_name):
"""Check if a certain workflow exists in the DB"""
result = False
# Need at least one iteration
for path in data_base.execute("SELECT path FROM workflows WHERE workflow_name = ?;", (workflow_name,)):
result = True
return result
def path_exists(data_base, workflow_name, path):
"""Return True if a certain path exist in the DB in a specific workflow, and False if not"""
result = False
# Need at least one iteration
for workflow_name, path in data_base.execute("SELECT workflow_name, path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "'" + " and path = " + "'" + path + "';"):
result = True
return result
def main():
# Connect to the DB, create new one if doesn't exist
connection = sqlite3.connect('workflows.db')
# The cursor used for execute SQL command through the code
data_base = connection.cursor()
# Declare the architecture if the DB is just created
try:
data_base.execute("CREATE TABLE workflows(workflow_name text, path text);")
except Exception:
pass
run = True
while run:
workflow_list_name = get_workflow_list(data_base)
print_menu()
menu_choose = str(input("Enter your choice: "))
if menu_choose in workflow_list_name:
open_paths_from_workflow(data_base,menu_choose)
run = False
# Start workflow
if menu_choose is "1":
workflow_name = str(input("Which workflow do you want to start? "))
is_workflow_exist = False
# iterate through each path
for path in data_base.execute("SELECT path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "';"):
try:
# Start the path
os.startfile(path[0])
time.sleep(0.1)
except Exception:
print ("Error opening file: " + str(path[0]))
# There is at least one path
is_workflow_exist = True
if not is_workflow_exist:
print ("This workflow does not exist...")
else:
print ("Enjoy")
run = False
# New workflow
elif menu_choose is "2":
valid_path = []
workflow_name = str(input("Enter a name for this workflow: "))
# Check if the requested new workflow name is not in use
if workflow_exists(data_base, workflow_name):
print ("There's already a workflow with this name!")
# Make sure the name is not empty
elif workflow_name == '':
print ("Empty name?")
else:
print ("Enter the paths of your desired things to be open. Enter -1 to close and save this workflow")
print ('')
path = ""
counter = 1
while path != "-1":
path = str(input("Enter path number " + str(counter) + ": "))
# Check valid path\URL and that they not exist in the DB
if (is_valid_path(path) or is_valid_url(path)) is False or path_exists(data_base, workflow_name, path):
if path != "-1":
print ("Path either already exists or is invalid!")
valid_path.append(False)
else:
values = (workflow_name, path)
# Insert the values for the new workflow
data_base.execute("INSERT INTO workflows VALUES (?,?);", values)
print ("Path saved")
valid_path.append(True)
counter += 1
# Save changes
connection.commit()
if True in valid_path:
print (workflow_name + " workflow saved successfully!")
else:
print ("Workflow wasn't saved")
# Edit workflow
elif menu_choose is "3":
run2 = True
workflow_name = str(input("Which workflow do you want to edit? "))
if workflow_exists(data_base, workflow_name):
while run2:
print_menu2()
edit_choose = str(input("\tEnter your choice: "))
# Change workflow name
if edit_choose is "1":
new_workflow_name = str(input("\tEnter new workflow name: "))
data_base.execute("UPDATE workflows SET workflow_name = " + "'" + new_workflow_name + "'" + " WHERE workflow_name = " + "'" + workflow_name + "';")
# Save changes
connection.commit()
workflow_name = new_workflow_name
print ("\tName changed!")
# Add path to the workflow
elif edit_choose is "2":
path = str(input("\tEnter the path of your desired thing to be open: "))
if (is_valid_path(path) or is_valid_url(path)) is True and not path_exists(data_base, workflow_name, path):
values = (workflow_name, path)
data_base.execute("INSERT INTO workflows VALUES (?,?);", values)
connection.commit()
print ("\tPath added!")
else:
print ("\tPath either already exists or is invalid!")
# Delete path in the workflow
elif edit_choose is "3":
print("\tEnter path to delete: ")
# Get the lost of paths in the workflows
path_list = get_paths_based_workflow(data_base,workflow_name)
path_number_dict = {}
# Make number based choosing system
for i in range(len(path_list)):
print("\t" + str(i + 1) + " - " + str(path_list[i]))
path_number_dict[str(i + 1)] = path_list[i]
number_input = str(input("\t"))
try:
path = path_number_dict[number_input]
except:
path = ""
if path_exists(data_base, workflow_name, path):
# Delete...
data_base.execute("DELETE FROM workflows WHERE workflow_name = " + "'" + workflow_name + "'" + " and path = " + "'" + path + "';")
connection.commit()
print ("\tPath/URL deleted!")
else:
print ("\tPath doesn't exist!")
# Exit to Main Menu
elif edit_choose is "4":
print ("\tChanges saved!")
run2 = False
else:
print ("This workflow does not exist...")
elif menu_choose is "4":
print ("Which workflow do you want to delete?")
workflow_name = str(input())
# Check if the workflow exists
if workflow_exists(data_base, workflow_name):
data_base.execute("DELETE FROM workflows WHERE workflow_name = ?;", (workflow_name,))
# Save changes to prevent loss
connection.commit()
print ("Workflow deleted successfully!")
else:
print ("This workflow does not exist...")
# Print workflows
elif menu_choose is "5":
workflows_dict = {}
# Save the data to a dict
for name in data_base.execute("SELECT workflow_name, path FROM workflows;"):
workflows_dict[name[0]] = []
for name in data_base.execute("SELECT workflow_name, path FROM workflows;"):
workflows_dict[name[0]].append(name[1])
if bool(workflows_dict):
print ("We found these workflows:")
print ('')
else:
print ("No workflows were created!")
# Print the data
for key, value in workflows_dict.items():
print ("Name: " + key)
for i in range(len(value)):
if i == 0:
print ("Paths: " + value[i])
else:
print (" " + value[i])
print ('')
# Exit the program
elif menu_choose is "6":
print ("See you later!")
run = False
# Save (commit) the changes
connection.commit()
# Close the connection with the DB
connection.close()
if __name__ == "__main__":
main()
``` |
{
"source": "jorasofra/tytus",
"score": 2
} |
#### File: parser/team28/main.py
```python
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from tkinter import messagebox
from tkinter import scrolledtext
from tkinter.font import Font
import os
import json
from utils.analyzers.syntactic import *
from controllers.error_controller import ErrorController
from utils.reports.generate_ast import GraficarAST
from utils.reports.report_error import ReportError
from controllers.ast_construction import *
report_error = None
report_ast = None
class GUI:
archivo = ""
def __init__(self, window):
self.ventana = window
# Defino un titulo para el GUI
self.ventana.title("Query Tool")
# Defino un fondo para usar, pueden cambiarlo por otro color mas bonito
self.ventana.configure(background='#3c3f41')
self.ventana.columnconfigure(0, weight=1)
self.ventana.rowconfigure(0, weight=1)
# Creo un frame para que contenga la intefaz, es como en java se hace con swing
frame = LabelFrame(self.ventana)
# Posiciono el frame
frame.grid(row=0, column=0, columnspan=10, pady=10)
# Defino un fondo para usar, pueden cambiarlo por otro color mas bonito
frame.configure(background='#3c3f41', borderwidth=0)
#############################################_MENU_#############################################
# Creo un menu, es decir una lista desplegable
barraMenu = Menu(self.ventana)
self.ventana.config(menu=barraMenu)
archivoMenu = Menu(barraMenu, tearoff=0)
#############################################SUB MENU EJECUTAR#############################################
# Creo un menu, es decir una lista desplegable
archivoEjecutar = Menu(archivoMenu, tearoff=0)
# Este menu va a ser para ejecutar archivos y ser analizados por el parser
# command es para anadir metodos creados
archivoEjecutar.add_command(
label="Analizar Entrada", command=self.analizar_entrada)
#############################################MENU Abrir#############################################
archivoOpen = Menu(archivoMenu, tearoff=0)
archivoOpen.add_command(label="Abrir Archivo",
command=self.open_file_editor)
#############################################MENU Archivo#############################################
archivoMenu.add_command(label="Nuevo", command=self.nuevo)
archivoMenu.add_separator()
archivoMenu.add_cascade(label="Abrir", menu=archivoOpen)
archivoMenu.add_separator()
archivoMenu.add_command(label="Guardar", command=self.guardar)
archivoMenu.add_command(label="Guardar como...",
command=self.guardar_como)
archivoMenu.add_separator()
archivoMenu.add_cascade(label="Ejecutar", menu=archivoEjecutar)
archivoMenu.add_separator()
archivoMenu.add_command(label="Salir", command=self.terminar)
#############################################MENU WINDOWS##############################################
windows_menu = Menu(barraMenu, tearoff=0)
windows_menu.add_command(label='Report AST',
command=self.report_ast_windows)
windows_menu.add_command(label='Report Errors',
command=self.report_errors_windows)
#############################################MENU LINUX################################################
ubuntu_menu = Menu(barraMenu, tearoff=0)
ubuntu_menu.add_command(label='Report AST',
command=self.report_ast_ubuntu)
ubuntu_menu.add_command(label='Report Errors',
command=self.report_errors_ubuntu)
#############################################MENU REPORTES#############################################
archivoReportes = Menu(barraMenu, tearoff=0)
archivoReportes.add_cascade(label="Windows", menu=windows_menu)
archivoReportes.add_separator()
archivoReportes.add_cascade(label="Linux", menu=ubuntu_menu)
#############################################MENU PRINCIPAL#############################################
barraMenu.add_cascade(label="Archivo",
menu=archivoMenu) # anade submenu
barraMenu.add_cascade(label="Reportes", menu=archivoReportes)
barraMenu.configure(background='SpringGreen')
############################################_ENTRADA_############################################
Label(frame, text='Archivo de Entrada', borderwidth=0,
font='Arial 15 bold', width=52, bg='#3c3f41', foreground='#fff').grid(row=3, column=0)
# Crea un scroll por si el texto es muy largo
self.entrada = scrolledtext.ScrolledText(frame, borderwidth=0, height=35,
width=70, bg='#2e2e31', foreground='#fff')
self.entrada.grid(row=4, column=0, padx=30)
# Para este editor aun hay que ver si lo usamos como consola para errores, si no lo quitamos
Label(frame, text='Consola', borderwidth=0,
font='Arial 15 bold', width=52, bg='#3c3f41', foreground='#fff').grid(row=3, column=1)
self.salida = scrolledtext.ScrolledText(frame, state=DISABLED, borderwidth=0, height=35,
width=70, bg='#1c1c1e', foreground='#9efb01')
self.salida.grid(row=4, column=1, padx=30)
# END
# Metodo para abrir archivo y colocarlo en el editor
def open_file_editor(self):
filename = askopenfilename(title="Abrir Archivo")
archivo = open(filename, "r")
texto = archivo.read()
self.entrada.insert(INSERT, texto)
archivo.close()
messagebox.showinfo("CARGA", "SE CARGO CORRECTAMENTE EL ARCHIVO SQL")
return
# Crea una nueva pestana
def nuevo(self):
self.entrada.delete(1.0, END)
self.salida.delete(1.0, END)
self.archivo = ""
# Guarda el archivo
def guardar(self):
if self.archivo == "":
self.guardar_como()
else:
guardar_info = open(self.archivo, "w")
guardar_info.write(self.entrada.get("1.0", END))
guardar_info.close()
# Opcion para guardar como
def guardar_como(self):
guardar_info = asksaveasfilename(title="Guardar Archivo")
write_file = open(guardar_info, "w+")
write_file.write(self.entrada.get("1.0", END))
write_file.close()
self.archivo = guardar_info
# Opcion para ejecutar el texto de entrada del editor
def analizar_entrada(self):
global report_error
global report_ast
texto = self.entrada.get("1.0", END)
result = parse(texto)
# jsonStr = json.dumps(result, default=lambda o: o.__dict__) #Convierte el AST a formato JSON para poder saber como se esta formando
# print(result) # Imprime el AST
if len(ErrorController().getList()) > 0:
report_error = ReportError()
messagebox.showerror('ERRORES', 'Se encontraron errores')
else:
result2 = parse2(texto)
report_ast = result2
messagebox.showinfo("EXITO", "SE FINALIZO EL ANALISIS CON EXITO")
# ---------- TEST ---------
for inst in result:
# print(inst)
inst.execute()
#report_errors = ReportError()
#report = open('dot.txt', 'w')
# report.write(report_errors.get_report(
# ErrorController().getExeErrList()))
# report.close()
#os.system('dot -Tpdf dot.txt -o error.pdf')
# os.startfile('error.pdf')
# ---------- TEST ---------
# Para mostrar el editor
def report_ast_ubuntu(self):
global report_ast
graficadora = GraficarAST()
report = open('./team28/dot.txt', 'w')
report.write(graficadora.generate_string(report_ast))
report.close()
os.system('dot -Tpdf ./team28/dot.txt -o ./team28/ast.pdf')
# Si estan en ubuntu dejan esta linea si no la comentan y descomentan la otra para windows
os.system('xdg-open ./team28/ast.pdf')
# os.open('ast.pdf')
# os.startfile('ast.pdf')
def report_errors_ubuntu(self):
global report_error
report = open('./team28/dot.txt', 'w')
report.write(report_error.get_report(ErrorController().getList()))
report.close()
os.system('dot -Tpdf ./team28/dot.txt -o ./team28/error.pdf')
os.system('xdg-open ./team28/error.pdf')
def report_errors_windows(self):
global report_error
report = open('dot.txt', 'w')
report.write(report_error.get_report(ErrorController().getList()))
report.close()
os.system('dot -Tpdf dot.txt -o error.pdf')
os.startfile('error.pdf')
def report_ast_windows(self):
global report_ast
graficadora = GraficarAST()
report = open('dot.txt', 'w')
report.write(graficadora.generate_string(report_ast))
report.close()
os.system('dot -Tpdf dot.txt -o ast.pdf')
os.startfile('ast.pdf')
# Para salir de la aplicacion
def terminar(self):
salir = messagebox.askokcancel("Salir", "Está seguro que desea salir?")
if salir:
self.ventana.destroy()
return
if __name__ == '__main__':
root = Tk()
app = GUI(root)
root.mainloop()
```
#### File: instructions/DDL/database_inst.py
```python
from models.instructions.shared import Instruction
from controllers.type_checker import TypeChecker
class CreateDB(Instruction):
def __init__(self, properties, replace, noLine, noColumn):
# if_not_exists:bool, id:str, listpermits: []
self._properties = properties
self._replace = replace
self._noLine = noLine
self._noColumn = noColumn
def __repr__(self):
return str(vars(self))
def execute(self):
typeChecker = TypeChecker()
database = typeChecker.searchDatabase(self._properties['id'])
if database:
if self._properties['if_not_exists']:
return
if self._replace:
typeChecker.deleteDatabase(database.name, self._noLine,
self._noColumn)
# TODO Verificar permisos
typeChecker.createDatabase(self._properties['id'], self._noLine,
self._noColumn)
class DropDB(Instruction):
def __init__(self, if_exists, database_name, noLine, noColumn):
self._if_exists = if_exists
self._database_name = database_name
self._noLine = noLine
self._noColumn = noColumn
def __repr__(self):
return str(vars(self))
def execute(self):
typeChecker = TypeChecker()
database = typeChecker.searchDatabase(self._database_name)
if self._if_exists and not database:
return
typeChecker.deleteDatabase(self._database_name, self._noLine,
self._noColumn)
class ShowDatabase(Instruction):
'''
SHOW DATABASE recibe una ER para mostrar esas bases de datos, caso contrario muestra todas
'''
def __init__(self, patherMatch):
self._patherMatch = patherMatch
def execute(self):
pass
def __repr__(self):
return str(vars(self))
class AlterDatabase(Instruction):
'''
ALTER DATABASE recibe ya sea el nombre o el duenio antiguo y lo sustituye por un nuevo nombre o duenio
si recibe un 1 es porque es la base de datos
si recibe un 2 es porque es el duenio
'''
def __init__(self, alterType, oldValue, newValue):
self._alterType = alterType
self._oldValue = oldValue
self._newValue = newValue
def execute(self):
pass
def __repr__(self):
return str(vars(self))
```
#### File: instructions/Expression/expression.py
```python
from enum import Enum
from abc import abstractmethod
class SymbolsAritmeticos(Enum):
PLUS = 1
MINUS = 2
TIMES = 3
DIVISON = 4
EXPONENT = 5
MODULAR = 6
BITWISE_SHIFT_RIGHT = 7
BITWISE_SHIFT_LEFT = 8
BITWISE_AND = 9
BITWISE_OR = 10
BITWISE_XOR = 11
class SymbolsTipoDato(Enum):
INTEGER = 1
FLOAT = 2
STRING = 3
CHAR = 4
BOOLEANO = 5
class SymbolsRelop(Enum):
EQUALS = 1
NOT_EQUAL = 2
GREATE_EQUAL = 3
GREATE_THAN = 4
LESS_THAN = 5
LESS_EQUAL = 6
NOT_EQUAL_LR = 7
class SymbolsUnaryOrOthers(Enum):
UMINUS = 1
UPLUS = 2
BITWISE_NOT = 3
SQUARE_ROOT = 4
CUBE_ROOT = 5
class Expression:
@abstractmethod
def procces(self):
pass
class BinaryOperation(Expression):
'''
Una operacion binaria recibe, sus dos operandos y el operador
'''
def __init__(self, value1, value2, operador) :
self.value1 = value1
self.value2 = value2
self.operador = operador
def __repr__(self):
return str(vars(self))
def procces(self, expression):
value1 = self.value1.procces(expression)
value2 = self.value2.procces(expression)
operador = self.operador
if value1.type != SymbolsTipoDato.INTEGER and value2.type != SymbolsTipoDato.INTEGER:
print('error de ejecucion')
return
value = 0
if operador == SymbolsAritmeticos.PLUS:
value = round(value1.value + value2.value, 2)
elif operador == SymbolsAritmeticos.MINUS:
value = round(value1.value - value2.value, 2)
elif operador == SymbolsAritmeticos.TIMES:
value = round(value1.value * value2.value, 2)
elif operador == SymbolsAritmeticos.DIVISON:
value = round(value1.value / value2.value, 2)
elif operador == SymbolsAritmeticos.EXPONENT:
value = round(value1.value ** value2.value, 2)
elif operador == SymbolsAritmeticos.MODULAR:
value = round(value1.value % value2.value, 2)
elif operador == SymbolsAritmeticos.BITWISE_SHIFT_LEFT:
value = round(value1.value << value2.value, 2)
elif operador == SymbolsAritmeticos.BITWISE_SHIFT_RIGHT:
value = round(value1.value >> value2.value, 2)
elif operador == SymbolsAritmeticos.BITWISE_AND:
value = round(value1.value & value2.value)
elif operador == SymbolsAritmeticos.BITWISE_OR:
value = round(value1.value | value2.value)
elif operador == SymbolsAritmeticos.BITWISE_XOR:
value = round(value1.value ^ value2.value)
return NumberExpression(SymbolsTipoDato.INTEGER, value)
# TODO <NAME>
class Relop(Expression):
'''
Relop contiene los operadores logicos
== != >= ...
'''
def __init__(self, value1, operador_logico, value2):
self.value1 = value1
self.operador_logico = operador_logico
self.value2 = value2
def __repr__(self):
return str(vars(self))
class ExpressionsTime(Expression):
'''
ExpressionsTime
'''
def __init__(self, name_date, type_date, name_opt):
self.name_date = name_date
self.type_date = type_date
self.name_opt = name_opt
def __repr__(self):
return str(vars(self))
class ExpressionsTrigonometric(Expression):
'''
ExpressionsTrigonometric
'''
def __init__(self, type_trigonometric, expression1, optional_expression2):
self.type_trigonometric = type_trigonometric
self.expression1 = expression1
self.optional_expression2 = optional_expression2
def __repr__(self):
return str(vars(self))
# TODO <NAME>
class ExpressionsGreastLeast(Expression):
'''
ExpressionsGreastLeast
'''
def __init__(self, type_expression, lista_arr):
self.type_expression = type_expression
self.lista_arr = lista_arr
def __repr__(self):
return str(vars(self))
# TODO <NAME>
class MathematicalExpressions(Expression):
'''
MathematicalExpressions
'''
def __init__(self, type_expression, lista_arr, optional_alias):
self.type_expression = type_expression
self.lista_arr = lista_arr
self.optiona_alias = optional_alias
def __repr__(self):
return str(vars(self))
class UnaryOrSquareExpressions(Expression):
'''
UnaryOrSquareExpressions
'''
def __init__(self, sign, expression_list):
self.sign = sign
self.expression_list = expression_list
def __repr__(self):
return str(vars(self))
class LogicalOperators(Expression):
'''
LogicalOperators
'''
def __init__(self, value1, logical_operator, value2):
self.value1 = value1
self.logical_operator = logical_operator
self.value2 = value2
def __repr__(self):
return str(vars(self))
class NumberExpression(Expression):
def __init__(self, type, value):
self.value = value
self.type = type
def procces(self, object):
return self
def __repr__(self):
return str(vars(self))
class StringExpression(Expression):
def __init__(self, type, value):
self.type = type
self.value = value
def procces(self):
return self
def __repr__(self):
return str(vars(self))
```
#### File: analizer/abstract/expression.py
```python
from abc import abstractmethod
from enum import Enum
import pandas as pd
from analizer.functions import MathFunctions as mf
from analizer.functions import TrigonometricFunctions as trf
# import abstract.select_data as data
# Prueba para dataframe:
# df = data.dataSelect()
# df.crossJoin()
class TYPE(Enum):
NUMBER = 1
STRING = 2
BOOLEAN = 3
class ERROR(Enum):
TYPEERROR = 1
OPERATORERROR = 2
class Expression:
"""
Esta clase representa una expresión
"""
def __init__(self, row, column) -> None:
self.row = row
self.column = column
@abstractmethod
def execute(self, environment):
"""
Metodo que servira para ejecutar las expresiones
"""
class Primitive(Expression):
"""
Esta clase contiene los tipos primitivos
de datos como STRING, NUMBER, BOOLEAN
"""
def __init__(self, type_, value, row, column):
Expression.__init__(self, row, column)
self.type = type_
self.value = value
self.temp = str(value)
def execute(self, environment):
return self
class Identifiers(Expression):
"""
Esta clase representa los nombre de columnas
"""
value = None
# TODO: implementar la funcion para obtener el type de la columna
def __init__(self, table, name, row, column):
Expression.__init__(self, row, column)
self.table = table
self.name = name
self.temp = table + "." + name
self.type = TYPE.NUMBER
def execute(self, environment):
"""
TODO:Se debe hacer la logica para buscar los identificadores en la tabla
"""
# self.value = df.dataTable[self.temp]
return self
class UnaryArithmeticOperation(Expression):
"""
Esta clase recibe un parametro de expresion
para realizar operaciones unarias
"""
def __init__(self, exp, operator, row, column):
Expression.__init__(self, row, column)
self.exp = exp
self.operator = operator
self.temp = str(operator) + exp.temp
def execute(self, environment):
exp = self.exp.execute(environment)
operator = self.operator
if exp.type != TYPE.NUMBER:
return ErrorUnaryOperation(exp.value, self.row, self.column)
if operator == "+":
value = exp.value
elif operator == "-":
value = exp.value * -1
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.NUMBER, value, self.row, self.column)
class BinaryArithmeticOperation(Expression):
"""
Esta clase recibe dos parametros de expresion
para realizar operaciones entre ellas
"""
def __init__(self, exp1, exp2, operator, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.temp = exp1.temp + str(operator) + exp2.temp
def execute(self, environment):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
operator = self.operator
if exp1.type != TYPE.NUMBER or exp2.type != TYPE.NUMBER:
return ErrorBinaryOperation(exp1.value, exp2.value, self.row, self.column)
if operator == "+":
value = exp1.value + exp2.value
elif operator == "-":
value = exp1.value - exp2.value
elif operator == "*":
value = exp1.value * exp2.value
elif operator == "/":
value = exp1.value / exp2.value
elif operator == "^":
value = exp1.value ** exp2.value
elif operator == "%":
value = exp1.value % exp2.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.NUMBER, value, self.row, self.column)
class BinaryRelationalOperation(Expression):
"""
Esta clase contiene las expresiones binarias de comparacion
que devuelven un booleano.
"""
def __init__(self, exp1, exp2, operator, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.temp = exp1.temp + str(operator) + exp2.temp
def execute(self, environment):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
operator = self.operator
try:
if operator == "<":
value = exp1.value < exp2.value
elif operator == ">":
value = exp1.value > exp2.value
elif operator == ">=":
value = exp1.value >= exp2.value
elif operator == "<=":
value = exp1.value <= exp2.value
elif operator == "=":
value = exp1.value == exp2.value
elif operator == "!=":
value = exp1.value != exp2.value
elif operator == "<>":
value = exp1.value != exp2.value
elif operator == "ISDISTINCTFROM":
value = exp1.value != exp2.value
elif operator == "ISNOTDISTINCTFROM":
value = exp1.value == exp2.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.BOOLEAN, value, self.row, self.column)
except TypeError:
return ErrorBinaryOperation(exp1.value, exp2.value, self.row, self.column)
except:
print("Error fatal BinaryRelationalOperation")
comps = {
"ISNULL": "IS NULL",
"NOTNULL": "NOT NULL",
"ISTRUE": "IS TRUE",
"ISFALSE": "IS FALSE",
"ISUNKNOWN": "IS UNKNOWN",
"ISNOTNULL": "IS NOT NULL",
"ISNOTTRUE": "IS NOT TRUE",
"ISNOTFALSE": "IS NOT FALSE",
"ISNOTUNKNOWN": "IS NOT UNKNOWN",
"BETWEEN": "BETWEEN",
"NOTBETWEEN": "NOT BETWEEN",
"BETWEENSYMMETRIC": "BETWEEN SYMMETRIC",
}
class UnaryRelationalOperation(Expression):
"""
Esta clase contiene las expresiones unarias de comparacion
que devuelven un booleano.
"""
def __init__(self, exp, operator, row, column):
Expression.__init__(self, row, column)
self.exp = exp
self.operator = operator
self.temp = exp.temp + " " + comps.get(operator)
def execute(self, environment):
exp = self.exp.execute(environment)
operator = self.operator
try:
if operator == "ISNULL":
value = exp.value == None
elif operator == "NOTNULL":
value = exp.value != None
elif operator == "ISTRUE":
value = exp.value == True
elif operator == "ISFALSE":
value = exp.value == False
elif operator == "ISUNKNOWN":
value = exp.value == None
elif operator == "ISNOTNULL":
value = exp.value != None
elif operator == "ISNOTTRUE":
value = exp.value != True
elif operator == "ISNOTFALSE":
value = exp.value != False
elif operator == "ISNOTUNKNOWN":
value = exp.value != None
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.BOOLEAN, value, self.row, self.column)
except TypeError:
return ErrorUnaryOperation(exp.value, self.row, self.column)
except:
print("Error fatal UnaryRelationalOperation")
class TernaryRelationalOperation(Expression):
"""
Esta clase contiene las expresiones ternarias de comparacion
que devuelven un booleano.
"""
def __init__(self, exp1, exp2, exp3, operator, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.exp3 = exp3
self.operator = operator
self.temp = (
exp1.temp
+ " "
+ comps.get(operator)
+ " "
+ self.exp2.temp
+ " AND "
+ self.exp3.temp
)
def execute(self, environment):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
exp3 = self.exp3.execute(environment)
operator = self.operator
try:
if operator == "BETWEEN":
value = exp1.value > exp2.value and exp1.value < exp3.value
elif operator == "NOTBETWEEN":
value = not (exp1.value > exp2.value and exp1.value < exp3.value)
elif operator == "BETWEENSYMMETRIC":
t1 = exp1.value > exp2.value and exp1.value < exp3.value
t2 = exp1.value < exp2.value and exp1.value > exp3.value
value = t1 or t2
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.BOOLEAN, value, self.row, self.column)
except TypeError:
return ErrorTernaryOperation(
exp1.value, exp2.value, exp3.value, self.row, self.column
)
except:
print("Error fatal TernaryRelationalOperation")
class BinaryLogicalOperation(Expression):
"""
Esta clase contiene las expresiones booleanas binarias.
"""
def __init__(self, exp1, exp2, operator, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.temp = exp1.temp + " " + str(operator) + " " + exp2.temp
def execute(self, environment):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
operator = self.operator
if exp1.type != TYPE.BOOLEAN or exp2.type != TYPE.BOOLEAN:
return ErrorBinaryOperation(exp1.value, exp2.value, self.row, self.column)
if isinstance(exp1.value, pd.core.series.Series) or isinstance(
exp2.value, pd.core.series.Series
):
if operator == "AND":
value = exp1.value & exp2.value
elif operator == "OR":
value = exp1.value | exp2.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
else:
if operator == "AND":
value = exp1.value and exp2.value
elif operator == "OR":
value = exp1.value or exp2.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.BOOLEAN, value, self.row, self.column)
class UnaryLogicalOperation(Expression):
"""
Esta clase contiene las expresiones booleanas unarias.
"""
def __init__(self, exp, operator, row, column):
Expression.__init__(self, row, column)
self.exp = exp
self.operator = operator
self.temp = str(operator) + " " + exp.temp
def execute(self, environment):
exp = self.exp.execute(environment)
operator = self.operator
if exp.type != TYPE.BOOLEAN:
return ErrorUnaryOperation(exp.value, self.row, self.column)
if isinstance(exp.value, pd.core.series.Series):
if operator == "NOT":
value = ~exp.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
else:
if operator == "NOT":
value = not exp.value
else:
return ErrorOperatorExpression(operator, self.row, self.column)
return Primitive(TYPE.BOOLEAN, value, self.row, self.column)
class ErrorBinaryOperation(Expression):
"""
Reporta error de una expresion
"""
def __init__(self, exp1, exp2, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.error = (
"No se pudo concretar la operacion entre " + str(exp1) + " : " + str(exp2)
)
self.type = ERROR.TYPEERROR
def execute(self, environment):
print(self.error)
class ErrorTernaryOperation(Expression):
"""
Reporta error de una expresion
"""
def __init__(self, exp1, exp2, exp3, row, column):
Expression.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.exp3 = exp3
self.error = (
"No se pudo concretar la operacion entre "
+ str(exp1)
+ " : "
+ str(exp2)
+ " : "
+ str(exp3)
)
self.type = ERROR.TYPEERROR
def execute(self, environment):
print(self.error)
class ErrorUnaryOperation(Expression):
"""
Reporta error de una expresion
"""
def __init__(self, exp, row, column):
Expression.__init__(self, row, column)
self.exp = exp
self.error = "No se pudo concretar la operacion con " + str(exp)
self.type = ERROR.TYPEERROR
def execute(self, environment):
print(self.error)
class ErrorOperatorExpression(Expression):
"""
Reporta error de operador
"""
def __init__(self, operator, row, column):
Expression.__init__(self, row, column)
self.operator = operator
self.error = "No se pudo encontrar el operador: " + operator
self.type = ERROR.OPERATORERROR
def execute(self, environment):
print(self.error)
class FunctionCall(Expression):
"""
Esta clase contiene las llamadas a funciones
"""
def __init__(self, function, params, row, column):
Expression.__init__(self, row, column)
self.function = function
self.params = params
self.temp = str(function) + "("
for t in params:
self.temp += t.temp
self.temp += ")"
def execute(self, environment):
try:
valores = []
for p in self.params:
val = p.execute(environment).value
if isinstance(val, pd.core.series.Series):
val = val.tolist()
valores.append(val)
if self.function == "abs":
value = mf.absolute(*valores)
elif self.function == "cbrt":
value = mf.cbrt(*valores)
elif self.function == "ceil":
value = mf.ceil(*valores)
elif self.function == "ceiling":
value = mf.ceiling(*valores)
elif self.function == "degrees":
value = mf.degrees(*valores)
elif self.function == "div":
value = mf.div(*valores)
elif self.function == "exp":
value = mf.exp(*valores)
elif self.function == "factorial":
value = mf.factorial(*valores)
elif self.function == "floor":
value = mf.floor(*valores)
elif self.function == "gcd":
value = mf.gcd(*valores)
elif self.function == "lcm":
value = mf.lcm(*valores)
elif self.function == "ln":
value = mf.ln(*valores)
elif self.function == "log":
value = mf.log(*valores)
elif self.function == "log10":
value = mf.log10(*valores)
elif self.function == "mod":
value = mf.mod(*valores)
elif self.function == "pi":
value = mf.pi()
elif self.function == "power":
value = mf.pow(*valores)
elif self.function == "radians":
value = mf.radians(*valores)
elif self.function == "round":
value = mf.round(*valores)
elif self.function == "sign":
value = mf.sign(*valores)
elif self.function == "sqrt":
value = mf.sqrt(*valores)
elif self.function == "trunc":
value = mf.truncate_col(*valores)
elif self.function == "width_bucket":
value = mf.with_bucket(*valores)
elif self.function == "random":
value = mf.random_()
elif self.function == "acos":
value = trf.acos(*valores)
elif self.function == "acosd":
value = trf.acosd(*valores)
elif self.function == "asin":
value = trf.asin(*valores)
elif self.function == "asind":
value = trf.asind(*valores)
elif self.function == "atan":
value = trf.atan(*valores)
elif self.function == "atand":
value = trf.atand(*valores)
elif self.function == "atan2":
value = trf.atan2(*valores)
elif self.function == "atan2d":
value = trf.atan2d(*valores)
elif self.function == "cos":
value = trf.cos(*valores)
elif self.function == "cosd":
value = trf.cosd(*valores)
elif self.function == "cot":
value = trf.cot(*valores)
elif self.function == "cotd":
value = trf.cotd(*valores)
elif self.function == "sin":
value = trf.sin(*valores)
elif self.function == "sind":
value = trf.sind(*valores)
elif self.function == "tan":
value = trf.tan(*valores)
elif self.function == "tand":
value = trf.tand(*valores)
elif self.function == "sinh":
value = trf.sinh(*valores)
elif self.function == "cosh":
value = trf.cosh(*valores)
elif self.function == "tanh":
value = trf.tanh(*valores)
elif self.function == "asinh":
value = trf.asinh(*valores)
elif self.function == "acosh":
value = trf.acosh(*valores)
elif self.function == "atanh":
value = trf.atanh(*valores)
else:
value = valores[0]
if isinstance(value, list):
if len(value) <= 1:
value = value[0]
else:
value = pd.Series(value)
return Primitive(TYPE.NUMBER, value, self.row, self.column)
except TypeError:
print("Error de tipos en llamada a funciones")
except:
print("Error desconocido")
``` |
{
"source": "joravkumar/duplicity-unattended",
"score": 3
} |
#### File: backup-monitor/backup_monitor/app.py
```python
import os
import re
from datetime import datetime, date, timedelta
import boto3
def _find_last_dates_by_prefix(bucket_name):
"""Find the most recent backup set dates for each key prefix.
:param bucket_name: Name of the bucket to be searched.
:return: A dict of key prefixes (paths) to date objects representing the most recent backup dates.
"""
# Pattern that matches a manifest key and captures the end date.
manifest_pattern = re.compile(r'.*\.(\d{8})T\d{6}Z\.manifest.gpg')
result = dict()
for obj in boto3.resource('s3').Bucket(bucket_name).objects.all():
prefix, basename = os.path.split(obj.key)
match = manifest_pattern.fullmatch(basename)
if match:
# The object appears to be a Duplicity manifest.
end_date = datetime.strptime(match.group(1), '%Y%m%d').date()
if end_date > result.get(prefix, date(1970, 1, 1)):
result[prefix] = end_date
return result
def _send_mail(sender, recipient, subject, body):
"""Send an email.
:param sender: The sender address.
:param recipient: The recipient address.
:param subject: Subject.
:param body: Plain text body.
:return: The sent message ID.
"""
ses = boto3.client('ses')
charset = 'utf-8'
response = ses.send_email(
Destination={
'ToAddresses': [recipient],
},
Message={
'Body': {
'Text': {
'Charset': charset,
'Data': body,
},
},
'Subject': {
'Charset': charset,
'Data': subject,
},
},
Source=sender,
)
return response['MessageId']
def _format_dates_by_prefix(bucket_name, dates_by_prefix):
"""Return a string containing backup date for each prefix separated by newlines.
Example:
2018-01-01: bucket_name/some/prefix
2018-02-01: bucket_name/another/prefix
:param bucket_name: Name of the bucket.
:param dates_by_prefix: Dict of prefixes mapped to dates.
:return: Formatted string.
"""
lines = [f'* {backup_date.isoformat()}: {bucket_name}/{prefix}' for prefix, backup_date in dates_by_prefix.items()]
return '\n'.join(lines) + '\n'
def lambda_handler(event, context):
"""Send email if any backups have grown stale.
Find all backups sets in a bucket. For each unique path (prefix) containing at least one backup set, find the most
recent manifest. If it is older than than today - MAX_AGE_DAYS, the backup is considered stale. This function sends
an email notification if there are any stale backups found at the end of this process.
Required env variables:
MAX_AGE_DAYS: Maximum age in days of a backup set in a set before the backup is considered stale.
BUCKET_NAME: The bucket name to check for backups.
SENDER_ADDR: Sender email address.
RECIPIENT_ADDR: Recipient email address.
:param event: If the event key 'testEmail' exists, an email is sent even if no stale backups are found.
Otherwise, the event is not used.
:param context: Not used.
:return: Message ID if an email was sent. Otherwise, None.
"""
# Check inputs.
bucket_name = os.environ['BUCKET_NAME']
sender = os.environ['SENDER_ADDR']
recipient = os.environ['RECIPIENT_ADDR']
if not (bucket_name and sender and recipient and os.environ['MAX_AGE_DAYS']):
raise ValueError('Missing required env variable.')
max_age_days = int(os.environ['MAX_AGE_DAYS'])
if max_age_days < 1:
raise ValueError('MAX_AGE_DAYS must be positive.')
# Find latest backup dates for all prefixes.
latest_date_by_prefix = _find_last_dates_by_prefix(bucket_name)
if 'testEmail' in event:
subject = f'Backup monitor results: {bucket_name}'
msg = f'Most recent backups in S3 bucket {repr(bucket_name)}:\n' \
+ (_format_dates_by_prefix(bucket_name, latest_date_by_prefix)
if latest_date_by_prefix else 'There are no backups!')
return _send_mail(sender, recipient, subject, msg)
# Find all stale backups.
max_age_delta = timedelta(days=max_age_days)
today = date.today()
stale_date_by_prefix = {prefix: end_date for prefix, end_date in latest_date_by_prefix.items()
if today - end_date > max_age_delta}
if stale_date_by_prefix:
# Missing recent backups for at least one prefix.
subject = f'Missing recent backups: {bucket_name}'
msg = f'The following locations have not been backed up in in over {max_age_days} day(s):\n' \
+ _format_dates_by_prefix(bucket_name, latest_date_by_prefix) \
+ '\nPlease check to make sure backups are working properly.'
return _send_mail(sender, recipient, subject, msg)
``` |
{
"source": "Jorbay/Behavioral_Cloning_with_Car_Simulator",
"score": 3
} |
#### File: Jorbay/Behavioral_Cloning_with_Car_Simulator/model.py
```python
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import random
#This series of blocks creates lines, an array of image paths that each direct to a separate image obtained when recording users driving the
#car simulator in a manner I deem acceptable. Three different instances of training were used, and the filepaths of the images were stored in
#three separate csv's.
#The fourth with block that was commented out represents an attempt to force the network to learn how to deal with a particularly tough turn.
#This attempt was later show to actually hurt the network because I was giving it too many copies of the same pictures.
lines = []
with open('../recorded_data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
with open('../recorded_data_2/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
with open('../data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
#with open('../recorded_data_4/driving_log.csv') as csvfile:
# reader = csv.reader(csvfile)
# for line in reader:
# for i in range (0,10):
# lines.append(line)
#The next few lines are a setup for the generator, which allows a user
#to import picture matrices per batch, rather than all at once, which
#would use up much more memory. The data is shuffled befofre splitting it into batches
import os
from sklearn.model_selection import train_test_split
import sklearn
#This splits the training (4/5 of the data) and validation (1/5 of the data) sets
train_samples, validation_samples = train_test_split(lines,test_size=.2)
def generator(samples, batch_size = 32):
num_samples = len(samples)
#the dataTesters are only used to ascertain which data sets are being used
dataTester01 = 0
dataTester02 = 0
dataTester03 = 0
dataTester04 = 0
#This loop is run for the duration of the process
while 1:
random.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
#This series of if statements is used to change the datapaths from the csv's into usable datapaths
#for the current system
#the next if statement is done to distinguish the one dataset made on a windows computer
if (len(batch_sample[0].split('\\')) < 2):
if (len(batch_sample[0].split('/')) < 3):
name = '../data/IMG/' + batch_sample[0].split('/')[-1]
if dataTester01==0:
#print('dataTester01')
dataTester01 = dataTester01+1
elif (batch_sample[0].split('/')[-3] == 'recorded_data_4'):
name = '../recorded_data_4/IMG/' + batch_sample[0].split('/')[-1]
if dataTester02==0:
#print('dataTester02')
dataTester02 = dataTester02+1
else:
name = '../recorded_data_2/IMG/' + batch_sample[0].split('/')[-1]
if dataTester03==0:
#print('dataTester03')
dataTester03 = dataTester03+1
else:
name = '../recorded_data/IMG/' + batch_sample[0].split('\\')[-1]
if dataTester04==0:
#print('dataTester04')
dataTester04 = dataTester04+1
#The next few lines obtain the particular image matrix (center_image) and the particular
#steering angle (center_angle) and add them to arrays of image matrices and steering angles
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
#The next line and for loop make horizontally flipped copies of every image and
#flip the side of the relevant steering angles to avoid overfitting for the particular dataset
#which has a majority of left turning images because they were taken on a left turning track
augmented_images, augmented_angles = [],[]
for image,angle in zip(images,angles):
augmented_images.append(image)
augmented_angles.append(angle)
augmented_images.append(cv2.flip(image,1))
augmented_angles.append(angle*(-1.0))
#These last lines return the currently needed batches and shuffle them. It will continue on the next iteration
#of the offset for loop or, if the current epoch is done, on the next iteration of the while 1 loop
X_train = np.array(augmented_images)
y_train = np.array(augmented_angles)
yield sklearn.utils.shuffle(X_train, y_train)
#instantiates a generator for the training and validation data sets
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import keras
#print(X_train.shape)
#print(y_train.shape)
#gosh, Keras is lovely. It is so easy to use
#We will now build the network architecture, which is heavily based
#Nvidia's architecture from their End to End Learning for Self-Driving
#Cars paper (https://arxiv.org/abs/1604.07316)
#The only changes that seemed to improve the network were a cropping
#layer, which removed the sky from the pictures, relu activation after
#every convolutional or dense layer (except the last), and two more
#convolutional layers
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
#For some reason, the model could not crop until after the lambda layer
model.add(Cropping2D(cropping=((30,20),(1,1))))
#let's add convolution layers
model.add(Convolution2D(24,5,5,border_mode='valid', subsample = (2,2), activation = 'relu'))
model.add(Convolution2D(36,5,5,border_mode='valid', subsample = (2,2),activation='relu'))
#Pooling was found to hurt the model's performance
#model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(48,5,5,border_mode='valid', subsample = (2,2),activation='relu'))
#Dropout layers were also found to hurt the model's performance
#model.add(Dropout(.25))
model.add(Convolution2D(64,3,3,border_mode='valid',activation='relu'))
model.add(Convolution2D(64,3,3,border_mode='valid',activation='relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(64,3,3,border_mode='valid',activation='relu'))
model.add(Convolution2D(64,3,3,border_mode='valid',activation='relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(Dropout(.25))
#This is now just a series of dense (fully-connected) layers. The
#last layer is not followed by relu activation because we do not
# want only two possible steering outputs
model.add(Flatten())
model.add(Dense(1164,activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation = 'relu'))
model.add(Dense(1))
#The adam optimizer, as recommended by Udacity, performed well.
#I tried adjusting its epsilon value so as to escape local minimum's
#with more ease, but it only hurt performance
adamOpt = keras.optimizers.Adam()
#The rest of the code compiles the model and trains it for 3 epochs.
#Only three epochs were used to avoid overfitting on the training data.
model.compile(loss='mse', optimizer = adamOpt)
model.fit_generator(train_generator, samples_per_epoch = len(train_samples), validation_data=validation_generator, nb_val_samples = len(validation_samples), nb_epoch=3)
model.save('model.h5')
print("done building model.h5")
``` |
{
"source": "jorbecalona/sec_mirror",
"score": 2
} |
#### File: jorbecalona/sec_mirror/preprocess_10k.py
```python
import re
import os
import pandas as pd
import itertools
from bs4 import BeautifulSoup
from bs4.element import NavigableString as bs4_navstring
from scrapy.selector import Selector
import string
import unicodedata
from difflib import SequenceMatcher as SM
from apply_parallel import apply_parallel as ap
import html
from dask.diagnostics import ProgressBar
def get_num_to_alpha_ratio(textStr):
num_numeric = sum(c.isdigit() for c in textStr)
num_alpha = sum(c.isalpha() for c in textStr)
num_alpha_ratio = num_numeric / (num_numeric + num_alpha)
return num_alpha_ratio
#cleaning/filtration functions (return modified input string):
def strip_markup_tags(text, rep = '', detect_breaks = True):
if detect_breaks:
dummy = '==zzzzz=='
break_strs = ['<page>', '<p>', '<br>', '</div>']
for break_str in break_strs:
text = re.sub(break_str, dummy, text, flags=re.IGNORECASE)
text = re.sub('<[^<]+?>', rep, text)
text = re.sub(dummy, ' ', text)
return text
else:
return re.sub('<[^<]+?>', rep, text)
def strip_markup_tags2(text, rep='', ignore_item_tags=True):
if ignore_item_tags:
tags = list(set(re.findall('<[^<]+?>', text)))
for tag in tags:
if 'div' not in tag:
text = text.replace(tag, '')
return text
else:
return re.sub('<[^<]+?>', rep, text)
def decode_html_chars(text):
sub_dict = {
'( | |	| | )': ' ', #spaces
'("|"|‘|’|“|”|‘|’|“|”|’|̶[01];)': '"', #quotes
'(&|&)':'&', #ampersand
'('|')': "'", #apostraphe
'(<|<)': ' LT ', #less than sign
'(>|>)': ' GT ', #greater than sign
'(̵[12];)' : '-', #dashes
'(•|◦)': '', #bullets
'(¹)' : '', #references
}
for reg_str, sub_str in sub_dict.items():
text = re.sub(reg_str, sub_str, text, flags = re.IGNORECASE)
return text
def get_tag_iterator(text_str, tag, include_tags=False, return_match_objs=False):
if include_tags:
regStr = '(<{tag}>.+?</{tag}>)'.format(tag=tag)
else:
regStr = '(?<=<{tag}>)(.+?)(?=</{tag}>)'.format(tag=tag)
if return_match_objs:
return re.finditer(regStr, text_str, re.I | re.DOTALL)
else:
return map(lambda x: x.group(0), re.finditer(regStr, text_str, re.I | re.DOTALL))
def filter_html_tables(text_str, max_num_alpha_ratio=0.15):
table_iter = get_tag_iterator(text_str, 'table', return_match_objs= True)
for table_match_obj in table_iter:
table_str = table_match_obj.group(0)
table_start_ind = table_match_obj.start(0)
table_end_ind = table_match_obj.end(0)
item78_check = re.search('item([\s.]{0,4}|<[^<]+?>)[78]', table_str, re.IGNORECASE) is not None
ratio_check = (get_num_to_alpha_ratio(strip_markup_tags(table_str, detect_breaks = False)) <= max_num_alpha_ratio)
if item78_check or ratio_check:
#continue #aka do not remove table from string
before = text_str[0:table_start_ind]
after = text_str[table_end_ind::]
out_str = before+' '+strip_markup_tags(table_str)+' '+after #replace all markup within tables that are kept with space
text_str = out_str
else:
before = text_str[0:table_start_ind]
after = text_str[table_end_ind::]
out_str = before+' '+after
text_str = out_str
return text_str
def clean_anomalies(text_str):
text_str = re.sub('(-[\n\r\t\v])', '-', text_str) #replace dashes followed by a newline, carriage return, or tab with a dash
text_str = re.sub('(&[\S]{2,6};)', '', text_str) #remove all remaining html chars starting with &
text_str = re.sub('(#[a-f0-9]{6})', '', text_str) #remove all remaining html chars starting with #
#remove other anomalies:
text_str = re.sub('(\s-\s)', ' ', text_str)
text_str = re.sub('and/or', 'and or', text_str, flags=re.IGNORECASE)
text_str = re.sub('(([-.=]){2,}\s*)', ' ', text_str)
text_str = re.sub('_', '', text_str)
text_str = re.sub('(\s){3,}', ' ', text_str)
text_str = re.sub('(\n\s*?){3,}', '\n\n', text_str)
text_str = re.sub('(?<![\n])(\n)(?![\n\s])', ' ', text_str)
return text_str
def clean_sec_html_str(text_str, max_num_alpha_ratio=0.15):
raw_doc_text = text_str
try:
if raw_doc_text:
raw_doc_text = filter_html_tables(raw_doc_text, max_num_alpha_ratio= max_num_alpha_ratio) #filter tables
raw_doc_text = strip_markup_tags(raw_doc_text) #remove remaining markup
raw_doc_text = html.unescape(raw_doc_text) #unescape html chars
#raw_doc_text = decode_html_chars(raw_doc_text) #decode html chars
raw_doc_text = clean_anomalies(raw_doc_text) #clean anomalies
return raw_doc_text
else:
return False
except:
print('problem cleaning string')
return False
def read_html(html_path, n_bytes = None):
fhandle = open(html_path, 'r')
if n_bytes == None:
fileContents = fhandle.read()
else:
fileContents = fhandle.read(n_bytes)
return fileContents
def get_all_txt_filepaths(mypath):
filepaths = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith('.txt'):
filepaths.append(os.path.join(root, file))
return filepaths
def get_all_html_filepaths(mypath):
filepaths = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith('.htm'):
filepaths.append(os.path.join(root, file))
return filepaths
def write_to_file(html_str, fpath):
with open(fpath, "w") as fhandle:
fhandle.write(html_str)
return
def preprocess_10k(html_str):
html_str = clean_sec_html_str(html_str)
html_str = html_str.replace('/n', ' ')
return html_str
item_labels = ['item_1', 'item_1a', 'item_1b', 'item_2', 'item_3', 'item_4', 'item_5', 'item_6',
'item_7', 'item_7a', 'item_8', 'item_9', 'item_9a', 'item_9b', 'item_10', 'item_11', 'item_12',
'item_13', 'item_14', 'item_15']
item_titles = ['business', 'risk_factors', 'unresolved_staff_comments', 'properties', 'legal_proceedings',
'mine_safety_disclosures',
'market_for_registrants_common_equity_related_stockholder_matters_and_issuer_purchases_of_equity_securities',
'selected_financial_data',
'managements_discussion_and_analysis_of_financial_condition_and_results_of_operations',
'quantitative_and_qualitative_disclosures_about_market_risk',
'financial_statements_and_supplementary_data',
'changes_in_and_disagreements_with_accountants_on_accounting_and_financial_disclosure',
'controls_and_procedures', 'other_information',
'directors_executive_officers_and_corporate_governance',
'executive_compensation',
'security_ownership_of_certain_beneficial_owners_and_management_and_related_stockholder_matters',
'certain_relationships_and_related_transactions_and_director_independence',
'principal_accountant_fees_and_services', 'exhibits_financial_statement_schedules']
class ParseError(Exception):
pass
"""Raise for html Parsing Error"""
def open_file(fp):
with open(fp, 'r') as fhandle:
contents = fhandle.read()
return unicodedata.normalize('NFKD', contents).encode('ascii', 'ignore')
def tag_checker(cur_tag, end_tag):
try:
if type(cur_tag) == bs4_navstring:
return True
if cur_tag.has_attr('name'):
return cur_tag.attrs.get('name') != end_tag.attrs.get('name')
else:
return True
except:
return False
###########################
def normalize_elt(elt, alphanum=True):
"""
Normalize string by removing newlines, punctuation, spaces,
and optionally filtering for alphanumeric chars
Args:
elt (string):
string to normalize
alphanum (bool, optional, default True):
if True, only return elt if it contains at least
one alphanumeric char, return None otherwise
Returns:
norm_elt (string):
normalized string or None
"""
norm_elt = elt.replace('\n', '') # remove new lines
translator = str.maketrans('', '', string.punctuation)
norm_elt = norm_elt.lower().translate(translator) # lowercase then remove punctuation
norm_elt = norm_elt.strip().replace(' ', '_') # replace spaces with underscores
if alphanum:
alphanum_check = re.search('[a-zA-Z0-9]', norm_elt)
if alphanum_check:
return norm_elt
else:
return None
else:
return norm_elt
def normalize_elts(elts, alphanum=True):
"""
Normalize list of strings by calling
Args:
elts (list):
list of strings to normalize
alphanum (bool, optional, default True):
if True, only return elts that contains at least one alphanumeric char, return None otherwise
Returns:
(list): returns all elements that are not None
"""
row_elts = list(map(lambda x: normalize_elt(x, alphanum=alphanum), elts))
return [x for x in row_elts if x] #get all elements that are not None
def get_unique_elts(seq, keep_left_most=True):
"""
Get unique elements of list (seq) whilst preserving order
Args:
seq (iterable):
iterable of hashable objects
keep_left_most (bool, optional, default True):
if True, keep the left-most (aka the first occurring) element when there are repeats, otherwise keep right-most
Returns:
(list): list from seq with repeats removed
"""
seen = set()
seen_add = seen.add
if keep_left_most:
return [x for x in seq if not (x in seen or seen_add(x))]
else:
return list(reversed([x for x in reversed(seq) if not (x in seen or seen_add(x))]))
def get_parsed_items(html_str, fuzzy_threshold=0.8, get_marked_html=False):
# 1. find table of contents rows in html string
sel = Selector(text=html_str, type='html')
table_row_path = '//table//tr[(td//text()[re:match(.,"item","i")]) and (td//a[contains(@href,"#")])]'
toc_rows = sel.xpath(table_row_path)
if not toc_rows:
print('no links found')
return False
# 2. find text of rows and the first occuring link in each row (there should only be one unique link per row)
toc_rows_text = [get_unique_elts(x.xpath('.//text()[re:match(.,"[a-zA-Z_]")]').extract()) for x in toc_rows]
toc_rows_text = list(map(normalize_elts, toc_rows_text))
toc_rows_links = [get_unique_elts(x.xpath('.//a/@href').extract())[0] for x in toc_rows] # guaranteeing one link per row with [0]
# 3. determine each row's item label and title
toc_rows2 = []
for row_elts, row_link in reversed(list(zip(toc_rows_text, toc_rows_links))): # start from item 15 and go to item 1
row_dict = {'label': None, 'title': None, 'link': None, 'next_link': None}
key_match = list(set(row_elts) & set(item_labels))
val_match = list(set(row_elts) & set(item_titles))
if key_match: # first try to get exact matches on item labels
row_dict['label'] = key_match[0]
row_dict['title'] = item_titles[item_labels.index(key_match[0])]
elif val_match: # then try to get exact matches on item titles
row_dict['label'] = item_labels[item_titles.index(val_match[0])]
row_dict['title'] = val_match[0]
elif fuzzy_threshold < 1:
# if no exact matches can be found and
# fuzzy threshold is less than 1:
# perform fuzzy matching on item titles:
poss_matches = list(itertools.product(row_elts, item_titles))
sims = [SM(None, elt, title).ratio() for elt, title in poss_matches]
max_sim = max(sims)
if max_sim >= fuzzy_threshold: # fuzzy matching measurement
item_title = poss_matches[sims.index(max_sim)][1]
row_dict['label'] = item_labels[item_titles.index(item_title)]
row_dict['title'] = item_title
if row_dict['label'] and row_dict['title']: # if found, assign links and append
row_dict['link'] = row_link
if toc_rows2:
row_dict['next_link'] = toc_rows2[-1]['link']
else:
row_dict['next_link'] = None
toc_rows2.append(row_dict)
toc_rows2 = list(reversed(toc_rows2)) # change back to ascending order (item 1 first)
# 4. check if all items are present
toc_rows2_labels = [x['label'] for x in toc_rows2]
if set(toc_rows2_labels) != set(item_labels):
print('not all items found')
print('the following items are missing: ', str(list(set(item_labels) - set(toc_rows2_labels))))
return False
# 5. find html tags for each item:
soup = BeautifulSoup(html_str, 'lxml')
tag = None
for row_dict in reversed(toc_rows2):
row_dict.update({'next_tag': tag})
tag = soup.find('a', attrs={'name': row_dict['link'].replace('#', '')})
row_dict.update({'tag': tag})
# 6. update soup with new sections and extract html for each item:
for row_dict in toc_rows2:
next_elts = list(row_dict['tag'].next_elements)
els = [x for x in itertools.takewhile(lambda y: tag_checker(y, row_dict['next_tag']), next_elts)]
section = soup.new_tag('div')
section.attrs = {'class': 'marked_item', 'id': row_dict['label']}
row_dict['tag'].wrap(section)
for tag in els:
section.append(tag)
extracted_html = soup.find('div', attrs=section.attrs)
row_dict.update({'html': str(extracted_html)})
if get_marked_html:
new_html_str = str(soup)
return new_html_str
else:
return toc_rows2
def parse_clean_write(filename, html_file_dir_path, html_parsed_dir_path):
print('processing ', filename)
parsed = get_parsed_items(read_html(html_file_dir_path+filename))
if parsed:
combos = [(item['html'], item['label']) for item in parsed]
dirname = html_parsed_dir_path + filename.replace('.htm', '')
if not os.path.exists(dirname):
os.makedirs(dirname)
for item_html, label in combos:
item_html = preprocess_10k(str(item_html))
new_fname = filename.replace('.htm', '_'+label+'.htm')
write_to_file(item_html, dirname+'/'+new_fname)
print('successfully parsed the following file: ', filename)
print()
return True
else:
print('the following file is not parsable deterministically: ', filename)
print()
return False
def parallel_pcw(dfrow):
filename = dfrow['localPath_html'].split('/')[-1]
try:
return parse_clean_write(filename)
except:
print('an unknown error occurred when parsing the following file: ', filename)
print()
return False
def main():
spec = pd.read_csv('data_repo/10k_sample/spec.csv')
spec['localPath_html'] = spec['localPath_html'].str.replace('data_lab/10k_sample3/data_html/',
'data_repo/10k_sample/data_html/')
spec['parse_result'] = ap(spec, parallel_pcw)
spec.to_csv('data_repo/10k_sample/spec2.csv', index = False)
print('DONE')
def get_and_write_html(original_file_path, new_file_path):
contents = read_html(original_file_path)
html = re.search(r'<html>.+?</html>', contents, re.DOTALL | re.I)
dirname = '/'.join(new_file_path.split('/')[0:-1])
if not os.path.exists(dirname):
os.makedirs(dirname)
if html:
html = html.group()
write_to_file(html, new_file_path)
print('HTML found for ', original_file_path)
print('HTML written to ', new_file_path)
print()
return True
else:
print('HTML not found for ', original_file_path)
print()
return False
def parallel_pcw2(dfrow):
original_file_path = dfrow['original_file_path']
new_file_path = dfrow['new_file_path']
new_file_name = dfrow['new_file_name']
html_file_dir_path = dfrow['html_file_dir_path']
html_parsed_dir_path = dfrow['html_parsed_dir_path']
try:
find_html_check = get_and_write_html(original_file_path, new_file_path)
if not find_html_check:
return False
parse_html_check = parse_clean_write(new_file_name, html_file_dir_path, html_parsed_dir_path)
if not parse_html_check:
return False
except:
print('an unknown error occured while processing the following file: ', original_file_path.split('/')[-1])
print()
def main2():
original_file_dir_path = 'data_repo/wu_sec_filing_data/raw_text_10k/'
html_file_dir_path = 'data_repo/wu_sec_filing_data/html_text_10k/'
html_parsed_dir_path = 'data_repo/wu_sec_filing_data/html_text_10k_parsed/'
file_paths = get_all_txt_filepaths(original_file_dir_path)
new_file_names = [x.split('/')[-1].replace('.txt', '.htm') for x in file_paths]
new_file_paths = [html_file_dir_path + x for x in new_file_names]
combos = list(zip(file_paths, new_file_paths, new_file_names,
[html_file_dir_path]*len(file_paths), [html_parsed_dir_path]*len(file_paths)))
combos = pd.DataFrame.from_records(combos, columns = ['original_file_path', 'new_file_path', 'new_file_name',
'html_file_dir_path', 'html_parsed_dir_path'])
with ProgressBar():
combos['parse_result'] = ap(combos, parallel_pcw2)
combos.to_csv('data_repo/wu_sec_filing_data/parse_results.csv', index=False)
main2()
``` |
{
"source": "Jorch72/ForbiddenMagic2",
"score": 3
} |
#### File: ForbiddenMagic2/stupidJson/itemMaker.py
```python
def main():
print("Please enter filename:")
filename = input()
f = open('..\\src\\main\\resources\\assets\\forbidden\\models\\item\\' + filename + '.json', mode='xt')
f.write('{\n')
f.write(' "parent": "forbidden:item/base_item",\n')
f.write(' "textures": {\n')
f.write(' "layer0": "forbidden:items/' + filename + '"\n')
f.write(' }\n')
f.write('}')
f.close()
main()
``` |
{
"source": "Jorch72/PythonBitTorrent",
"score": 2
} |
#### File: Jorch72/PythonBitTorrent/main.py
```python
import click
import os
import sys
from scbt.logging import log
from scbt.daemon import daemon as _daemon
from scbt.client import send_action
from scbt.common import chunks
@click.group(invoke_without_command=True)
@click.option("--config", default=None, type=click.Path(exists=True))
@click.pass_context
def cli(ctx, config):
from scbt.config import load_config, load_default_config
if config:
if not load_config(config):
sys.stderr.write("Unable to load config '{}'\n".format(config))
sys.exit(1)
else:
load_default_config()
if ctx.invoked_subcommand is None:
status([])
@cli.command(help="Runs the scbt daemon")
@click.option("--fork", default=False, is_flag=True)
def daemon(fork):
if fork:
log.info("Forking to background")
if os.fork() != 0:
sys.exit()
log.info("Running as daemon")
_daemon()
@cli.command(help="Add a new torrent")
@click.argument('f', type=click.Path(exists=True))
@click.option("--paused", default=False, is_flag=True)
def add(f, paused):
if not os.path.isabs(f):
f = os.path.join(os.getcwd(), f)
payload = {
"paused": paused,
"path": f
}
output = send_action("add_torrent", payload)
if output["success"]:
sys.stdout.write("Added {}\n".format(output["info_hash"]))
else:
sys.stderr.write("Error: {}\n".format(output["error"]))
def meta_status(status, torrents):
print("scbt is running on pid {} (running for {} seconds)"\
.format(status["session"]["pid"], status["session"]["uptime"]))
print(":: [{} downloading] [{} seeding] [{} idle]"\
.format(status["downloading"], status["seeding"], status["idle"]))
print(":: [{} kb/s up] [{} kb/s down] [{} peers] [{:.2f} ratio]"\
.format(status["session"]["upload_rate"] / 1000,
status["session"]["download_rate"] / 1000,
status["session"]["num_peers"],
status["session"]["ratio"]))
for torrent in torrents["torrents"]:
print()
print("{}".format(torrent["name"]))
state = torrent["state"]
state = state[0:1].upper() + state[1:]
if state == "Downloading":
print(":: {} since {} ({:.0f}%)".format(state, "TODO", torrent["progress"] * 100))
print(":: Info hash: {}".format(torrent["info_hash"]))
total = len(torrent["pieces"])
sys.stdout.write(":: Progress:\n:: [")
for pieces in chunks(torrent["pieces"], int(total / 49)):
if all(pieces):
sys.stdout.write(":")
elif any(pieces):
sys.stdout.write(".")
else:
sys.stdout.write(" ")
sys.stdout.write("]\n")
def torrent_status(torrents):
for torrent in torrents:
if torrent != torrents[0]:
print()
print("{}".format(torrent["name"]))
print()
state = torrent["state"]
state = state[0:1].upper() + state[1:]
if state == "Downloading":
print("{} since {} ({:.0f}%)".format(state, "TODO", torrent["progress"] * 100))
print("Info hash: {}".format(torrent["info_hash"]))
total = len(torrent["pieces"])
sys.stdout.write("Progress:\n[")
for pieces in chunks(torrent["pieces"], int(total / 49)):
if all(pieces):
sys.stdout.write(":")
elif any(pieces):
sys.stdout.write(".")
else:
sys.stdout.write(" ")
sys.stdout.write("]\n")
@cli.command(help="Show status information")
@click.argument('what', nargs=-1)
@click.option('--show-all', default=False, is_flag=True)
def status(what, show_all):
cwd = os.getcwd()
status = send_action("status")
torrents = send_action("list_torrents")
matching = [t for t in torrents["torrents"] \
if t["info_hash"] in list(what) \
or t["path"] in [os.path.realpath(p) for p in what] \
or (t["path"] == cwd and len(what) == 0)]
if any(matching) and not show_all:
if len(what) == 0:
print("Only showing torrents being downloaded to this directory")
print("Override this behavior with --show-all")
print()
if len(matching) > 1:
meta_status(status, torrents)
else:
torrent_status(matching)
else:
meta_status(status, torrents)
@cli.command(help="Run interactive console on daemon")
def interact():
send_action("interact")
if __name__ == '__main__':
cli()
```
#### File: PythonBitTorrent/scbt/actions.py
```python
import os
from datetime import datetime
from functools import wraps
actions = dict()
def action(f):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
actions[f.__name__] = f
return wrapper
@action
def add_torrent(session, payload):
path = payload.get("path")
if not path:
return { "success": False, "error": "'path' is required" }
if not os.path.exists(path):
return { "success": False, "error": "File not found" }
t = session.add_torrent(path)
return { "success": True, "info_hash": t.info_hash }
@action
def status(session, payload):
status = session.status()
tstatus = [v.status() for k, v in session.torrents.items()]
response = {
"downloading": len([s for s in tstatus if str(s.state) == "downloading"]),
"seeding": len([s for s in tstatus if str(s.state) == "seeding"]),
"idle": len([s for s in tstatus if str(s.state) == "idle"]),
"session": {
"total_download": status.total_download,
"total_upload": status.total_upload,
"ratio": status.total_upload / status.total_download \
if status.total_upload != 0 else 0,
"num_peers": status.num_peers,
"download_rate": status.download_rate,
"upload_rate": status.upload_rate,
"uptime": (datetime.now() - session.started).seconds,
"pid": os.getpid()
}
}
return response
@action
def list_torrents(session, payload):
return {
"torrents": [v.json() for k, v in session.torrents.items()]
}
# TODO: Remove this before releasing scbt
@action
def interact(session, payload):
import code
code.interact(local=locals())
```
#### File: PythonBitTorrent/scbt/common.py
```python
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i+n]
```
#### File: PythonBitTorrent/scbt/session.py
```python
import libtorrent as lt
import binascii
import os
from datetime import datetime
from scbt.config import _cfg
from scbt.logging import log
class Torrent():
def __init__(self, info, torrent):
self.torrent = torrent
self.info_hash = binascii.b2a_hex(info.info_hash().to_bytes()).decode("utf-8")
self.name = torrent.name()
self.info = torrent.torrent_file()
self.save_path = torrent.save_path()
def pause(self):
self.torrent.pause()
def resume(self):
self.torrent.resume()
def status(self):
return self.torrent.status()
def json(self):
s = self.status()
return {
"name": self.name,
"info_hash": self.info_hash,
"progress": s.progress,
"pieces": s.pieces,
#"added_time": s.added_time,
#"finished_time": s.finished_time,
"state": str(s.state),
"upload_rate": s.upload_rate,
"download_rate": s.download_rate,
"active_connections": s.num_connections,
"num_peers": s.num_peers,
"priority": s.priority,
"current_tracker": s.current_tracker,
"trackers": self.torrent.trackers(),
"path": os.path.realpath(self.torrent.save_path())
}
class Session():
def __init__(self):
port = int(_cfg("listen", "bt"))
self.session = lt.session()
self.session.listen_on(port, port + 10)
# TODO: Configure more session settings from config
self.session.add_extension(lt.create_ut_pex_plugin)
self.session.add_extension(lt.create_ut_metadata_plugin)
self.session.add_extension(lt.create_metadata_plugin)
self.session.set_severity_level(lt.alert.severity_levels.info)
self.started = datetime.now()
self.torrents = dict()
def status(self):
return self.session.status()
def add_torrent(self, path):
e = lt.bdecode(open(path, 'rb').read())
info = lt.torrent_info(e)
params = {
"save_path": _cfg("torrents", "destination"),
"storage_mode": lt.storage_mode_t.storage_mode_sparse,
"ti": info
}
torrent = Torrent(info, self.session.add_torrent(params))
self.torrents[torrent.info_hash] = torrent
log.info("Added torrent {} - {}".format(torrent.info_hash, torrent.name))
return torrent
session = None
``` |
{
"source": "Jorch72/SchoolsofMagic",
"score": 3
} |
#### File: SchoolsofMagic/stupidJson/itemMaker.py
```python
def main():
print("Please enter filename:")
filename = input()
f = open("../src/main/resources/assets/schoolsmagic/models/item/" + filename + '.json', mode='xt')
f.write('{\n')
f.write(' "parent": "schoolsmagic:item/base_item",\n')
f.write(' "textures": {\n')
f.write(' "layer0": "schoolsmagic:items/' + filename + '"\n')
f.write(' }\n')
f.write('}')
f.close()
main()
``` |
{
"source": "jorchube/edpath",
"score": 3
} |
#### File: jorchube/edpath/edpath_gui.py
```python
from tkinter import *
import tkinter.messagebox as messagebox
from support.logging import log
import edp_modules.edp_controller as edp_controller
root = Tk(className="EDPath")
systemList = StringVar()
startSystem = StringVar()
endSystem = StringVar()
startSystemCheckState = IntVar()
endSystemCheckState = IntVar()
def _check_arguments():
system_names_list = []
start_system = None
end_system = None
if startSystemCheckState.get() is 1:
start_system = startSystem.get().upper().strip()
if endSystemCheckState.get() is 1:
end_system = endSystem.get().upper().strip()
system_names_list_tmp = systemList.get().split(",")
for system_name in system_names_list_tmp:
system_names_list.append(system_name.strip().upper())
log("system list: {0}".format(system_names_list))
log("start: {0}".format(start_system))
log("end: {0}".format(end_system))
return (system_names_list, start_system, end_system)
def calculateRouteButtonPressed():
system_names_list, start_system_name, end_system_name = _check_arguments()
route = edp_controller.compute_route(system_names_list, start_system_name, end_system_name)
messagebox.showinfo("Computed Route", route.pretty_string())
startSystemCheck = Checkbutton(root, text="Start System", variable=startSystemCheckState, onvalue=1, offvalue=0)
startSystemCheck.pack()
startSystemTextEntry = Entry(root, textvariable=startSystem)
startSystemTextEntry.pack()
endSystemCheck = Checkbutton(root, text="End System", variable=endSystemCheckState, onvalue=1, offvalue=0)
endSystemCheck.pack()
endSystemTextEntry = Entry(root, textvariable=endSystem)
endSystemTextEntry.pack()
systemListLabel = Label(root, text="System List:")
systemListLabel.pack()
systemListTextEntry = Entry(root, textvariable=systemList, width=100)
systemListTextEntry.pack()
calculateRouteButton = Button(None, text="Calculate Route", command=calculateRouteButtonPressed)
calculateRouteButton.pack()
root.mainloop()
```
#### File: edpath/edp_modules/edp_system.py
```python
import math
from support.logging import log
def _distance_between_two_points_3d(x0, y0, z0, x1, y1, z1):
x_component = math.pow((x1 - x0), 2)
y_component = math.pow((y1 - y0), 2)
z_component = math.pow((z1 - z0), 2)
return math.sqrt(x_component + y_component + z_component)
class EdpSystem:
def __init__(self, descriptor):
self.name = descriptor['name'].upper()
self.x = descriptor['x']
self.y = descriptor['y']
self.z = descriptor['z']
def distanceTo(self, target):
return _distance_between_two_points_3d(self.x, self.y, self.z, target.x, target.y, target.z)
## TESTS ##
_test_descriptors = [{ 'name': '<NAME>', 'x': 80.90625, 'y': -83.53125, 'z': -30.8125 },
{ 'name': '<NAME>', 'x': 19.78125, 'y': 3.5625, 'z': -153.8125 }]
def _should_create_a_system_object_from_a_dict():
s = EdpSystem(_test_descriptors[1])
assert s.name == _test_descriptors[1]['name'].upper()
assert s.x == _test_descriptors[1]['x']
assert s.y == _test_descriptors[1]['y']
assert s.z == _test_descriptors[1]['z']
def _should_compute_distance_to_another_system():
ref = EdpSystem(_test_descriptors[0])
tgt = EdpSystem(_test_descriptors[1])
distance = ref.distanceTo(tgt)
assert distance < 162.64 and distance > 162.63
def tests():
_should_create_a_system_object_from_a_dict()
_should_compute_distance_to_another_system()
log('edp_system: OK')
``` |
{
"source": "jorcontrerasp/BuscadorCIRepos",
"score": 3
} |
#### File: jorcontrerasp/BuscadorCIRepos/github_search.py
```python
from github import Github
import aux_functions as aux
import ci_yml_parser as ymlp
import dataF_functions as d
import ci_tools as ci
import random
import logging
import calendar
import time
import datetime
import os
import requests
import json
# Configuración de la búsqueda GitHub.
config = "github"
queryFile = ymlp.parseConfigParam(config, "queryFile")
filterCommits = ymlp.parseConfigParam(config, "filterCommits")
MIN_COMMITS = ymlp.parseConfigParam(config, "MIN_COMMITS")
MAX_COMMITS = ymlp.parseConfigParam(config, "MAX_COMMITS")
randomizeRepos = ymlp.parseConfigParam(config, "randomizeRepos")
N_RANDOM = ymlp.parseConfigParam(config, "N_RANDOM")
onlyPositives = ymlp.parseConfigParam(config, "onlyPositives")
def authenticate():
# Nos autenticamos y generamos un github_token para consultar la API de GitHub a través de la librería.
user = "jorcontrerasp"
token = aux.readFile("tokens/github_token.txt")
g = Github(user, token)
return g
def getGithubRepos(usePickleFile):
fRepos = "github_repos.pickle"
if usePickleFile:
aux.printLog("Utilizando el fichero " + fRepos + " para generar los repositorios GitHub.", logging.INFO)
if os.path.exists(fRepos):
filteredRepos = aux.loadRepositories(fRepos)
else:
raise Exception("No se ha encontrado el fichero pickle en la raíz del proyecto.")
else:
g = authenticate()
q = aux.readFile(queryFile)
#query = mq.mGithubQuery.getQueryIni()
aux.printLog("Ejecutando query: " + queryFile, logging.INFO)
generator = g.search_repositories(query=q)
# Convertimos el generador en una lista de repositorios.
aux.printLog("Generando lista de repositorios GitHub...", logging.INFO)
repositories = list(generator)
# Filtramos por el número de COMMITS.
filteredRepos = []
if filterCommits:
for repo in repositories:
commits = repo.get_commits().totalCount
if commits >= MIN_COMMITS and commits <= MAX_COMMITS:
filteredRepos.append(repo)
else:
for repo in repositories:
filteredRepos.append(repo)
# Seleccionamos N repositorios de manera aleatoria:
lFinal = []
if randomizeRepos:
while len(lFinal) < N_RANDOM:
item = random.choice(filteredRepos)
if item not in lFinal:
lFinal.append(item)
else:
lFinal = filteredRepos
# Guardamos la información de los repositorios recuperados en un archivo binario de Python.
aux.makePickle(fRepos, lFinal)
lFinal = aux.loadRepositories(fRepos)
# Imprimimos la lista de repositorios
aux.printGitHubRepoList(lFinal)
aux.printLog("Nº de repositorios: " + str(len(lFinal)), logging.INFO)
return lFinal
def getContents(repo, path):
# Aplicamos el control de la API rate.
doApiRateLimitControl()
# Obtenemos el contenido del repo.
contents = repo.get_contents(path)
return contents
def doApiRateLimitControl():
try:
g = authenticate()
rl = g.get_rate_limit()
rl_core = rl.core
core_remaining = rl_core.remaining
#aux.printLog("API core_remaining: " + str(core_remaining), logging.INFO)
rl_search = rl.search
search_remaining = rl_search.remaining
if core_remaining <= 0:
reset_timestamp = calendar.timegm(rl_core.reset.timetuple())
sleep_time = reset_timestamp - calendar.timegm(time.gmtime()) + 5
print("API rate limit exceded: " + str(sleep_time) + " sleep_time. Waiting...")
time.sleep(sleep_time)
g = authenticate()
except:
aux.printLog("Control del API rate limit exceded NO aplicado...", logging.WARNING)
def searchReposGitHubApi(lRepositories, df, df2, df3, df6):
lFound = []
for repo in lRepositories:
if not onlyPositives and not d.existsDFRecord(repo.full_name, df):
df = d.addDFRecord(repo, df, True)
if d.existsDFRecord(repo.full_name, df):
df = d.initCIYamlColumns(repo.full_name, df)
#found1,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI1, [], df, df2, df3, df6, [])
#found2,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI2, [], df, df2, df3, df6, [])
#found3,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI3, [], df, df2, df3, df6, [])
#found4,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI4, [], df, df2, df3, df6, [])
#found5,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI5, [], df, df2, df3, df6, [])
#found6,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI6, [], df, df2, df3, df6, [])
#found7,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI7, [], df, df2, df3, df6, [])
#found8,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI8, [], df, df2, df3, df6, [])
#found9,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI9, [], df, df2, df3, df6, [])
#found10,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI10, [], df, df2, df3, df6, [])
#found11,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI11, [], df, df2, df3, df6, [])
#found12,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI12, [], df, df2, df3, df6, [])
#found13,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, ci.HerramientasCI.CI13, [], df, df2, df3, df6, [])
found1,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI1, df, df2, df3, df6)
found2,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI2, df, df2, df3, df6)
found3,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI3, df, df2, df3, df6)
found4,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI4, df, df2, df3, df6)
found5,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI5, df, df2, df3, df6)
found6,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI6, df, df2, df3, df6)
found7 = False #found7,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI7, df, df2, df3, df6)
found8,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI8, df, df2, df3, df6)
found9,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI9, df, df2, df3, df6)
found10,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI10, df, df2, df3, df6)
found11,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI11, df, df2, df3, df6)
found12,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI12, df, df2, df3, df6)
found13,df,df3,df6 = searchLiteralPathFromRoot(repo, ci.HerramientasCI.CI13, df, df2, df3, df6)
# Si lo ha encontrado:
# - lo añadimos a la lista de encontrados.
found = found1 or found2 or found3 or found4 or found5 or found6 or found7 or found8 or found9 or found10 or found11 or found12 or found13
if found:
lFound.append(repo)
df,df2,df4,df5 = d.doAuxWithResultsDF(df, df2, df3, True)
# Generamos ficheros EXCEL con los resultados.
d.makeEXCEL(df, "github/github_results")
d.makeEXCEL(df3, "github/github_languages")
d.makeEXCEL(df4, "github/github_language_statistics")
d.makeEXCEL(df5, "github/github_ci_statistics")
d.makeEXCEL(df6, "github/github_stage_statistics")
return lFound
def searchLiteralPathFromRoot_REC(repo, CITool, literals, df, df2, df3, df6, lStagesProjectAdded):
aux.printLog("Buscando '" + CITool.value + "' en '" + repo.full_name + "'", logging.INFO)
try:
if len(literals)==0:
literals = ci.getCISearchFiles(CITool.value)
path = literals.pop(0)
getContents(repo, path)
if not d.existsDFRecord(repo.full_name, df):
df = d.addDFRecord(repo, df, True)
df = d.updateDataFrameCiColumn(repo, "***", CITool, True, df)
df2 = d.add1CounterDFRecord(CITool.value.lower(), "Encontrados_GitHub", df2)
language = "None"
if len(str(repo.language)) > 0:
language = str(repo.language)
if not d.existsDFRecord(language, df3):
df3 = d.addLanguageDFRecord(language, df3)
df3 = d.add1CounterDFRecord(language.lower(), CITool.value, df3)
# lStagesProjectAdded --> Lista de 'stages' a los que se les ha hecho un +1 en proyectos que lo utilizan.
ciObjRes = ymlp.getParseObj(repo, path, CITool, True)
if isinstance(ciObjRes, list):
for ciObj in ciObjRes:
str_ciobj = str(ciObj)
if str_ciobj != 'None':
df,df6,lStagesProjectAdded = d.updateDataFrameCiObj(repo, ciObj, True, df, df6, lStagesProjectAdded)
else:
str_ciobj = str(ciObjRes)
if str_ciobj != 'None':
df,df6,lStagesProjectAdded = d.updateDataFrameCiObj(repo, ciObjRes, True, df, df6, lStagesProjectAdded)
return True,df,df3,df6,lStagesProjectAdded
except:
if len(literals)>0:
found,df,df3,df6,lStagesProjectAdded = searchLiteralPathFromRoot_REC(repo, CITool, literals, df, df2,df3,df6,lStagesProjectAdded)
return found,df,df3,df6,lStagesProjectAdded
else:
return False,df,df3,df6,lStagesProjectAdded
def searchLiteralPathFromRoot(repo, CITool, df, df2, df3, df6):
aux.printLog("Buscando '" + CITool.value + "' en '" + repo.full_name + "'", logging.INFO)
lStagesProjectAdded = [] # Lista de 'stages' a los que se les ha hecho un +1 en proyectos que lo utilizan.
literals = ci.getCISearchFiles(CITool.value)
for path in literals:
encontrado = False
try:
c = getContents(repo, path)
encontrado = True
except:
encontrado = False
if encontrado:
if not d.existsDFRecord(repo.full_name, df):
df = d.addDFRecord(repo, df, True)
df = d.updateDataFrameCiColumn(repo, "***", CITool, True, df)
df2 = d.add1CounterDFRecord(CITool.value.lower(), "Encontrados_GitHub", df2)
language = "None"
if len(str(repo.language)) > 0:
language = str(repo.language)
if not d.existsDFRecord(language, df3):
df3 = d.addLanguageDFRecord(language, df3)
df3 = d.add1CounterDFRecord(language.lower(), CITool.value, df3)
ciObjRes = ymlp.getParseObj(repo, path, CITool, True)
if isinstance(ciObjRes, list):
for ciObj in ciObjRes:
str_ciobj = str(ciObj)
if str_ciobj != 'None':
df,df6,lStagesProjectAdded = d.updateDataFrameCiObj(repo, ciObj, True, df, df6, lStagesProjectAdded)
else:
str_ciobj = str(ciObjRes)
if str_ciobj != 'None':
df,df6,lStagesProjectAdded = d.updateDataFrameCiObj(repo, ciObjRes, True, df, df6, lStagesProjectAdded)
return True,df,df3,df6
return False,df,df3,df6
def searchInRepo(repo, literal):
found = False
contents = getContents(repo, "")
while contents:
contentFile = contents.pop(0)
if literal in contentFile.path.lower():
found = True
break
else:
if contentFile.type == "dir":
contents.extend(repo.get_contents(contentFile.path))
return found
def searchInRoot(repo, literal):
found = False
contents = getContents(repo, "")
for contentFile in contents:
if literal in contentFile.path.lower():
found = True
break
return found
def getAllRepoLanguages(languages_url):
languages = []
try:
languages_response = requests.get(languages_url)
text = languages_response.text
loaded_json = json.loads(text)
for l in loaded_json:
languages.append(l)
except:
languages = []
return languages
```
#### File: jorcontrerasp/BuscadorCIRepos/main.py
```python
import dataF_functions as d
import github_search as ghs
import gitlab_search as gls
import aux_functions as aux
import ci_yml_parser as ymlp
import pandas as pd
import os
import logging
# Configuración del proceso de búsqueda.
config = "process"
execute = ymlp.parseConfigParam(config, "execute")
doGithubSearch = ymlp.parseConfigParam(config, "doGithubSearch")
doGitlabSearch = ymlp.parseConfigParam(config, "doGitlabSearch")
usePickleFile = ymlp.parseConfigParam(config, "usePickleFile")
useResultsExcelFile = ymlp.parseConfigParam(config, "useResultsExcelFile")
def executeProcess():
try:
aux.printLog("Iniciando proceso...", logging.INFO)
fRepos = ""
# Generamos un DataFrame donde irán los contadores.
fCount = "results/counting.xlsx"
if useResultsExcelFile:
if os.path.exists(fCount):
counterDF = pd.read_excel(fCount, index_col=0)
else:
counterDF = d.makeCounterDataFrame()
else:
counterDF = d.makeCounterDataFrame()
if doGithubSearch:
fRepos = "github_repos.pickle"
fResults = "results/github/github_results.xlsx"
fLanguages = "results/github/github_languages.xlsx"
fStageStatistics = "results/github/github_stage_statistics.xlsx"
# Generamos un DataFrame donde irán los resultados.
if useResultsExcelFile:
if os.path.exists(fResults):
githubDF = pd.read_excel(fResults, index_col=0)
githubLanguagesDF = pd.read_excel(fLanguages, index_col=0)
githubStageStatisticsDF = pd.read_excel(fStageStatistics, index_col=0)
else:
githubDF = d.makeEmptyDataFrame()
githubLanguagesDF = d.makeEmptyLanguageDataFrame()
githubStageStatisticsDF = d.makeEmptyStageStatisticsDataFrame()
else:
githubDF = d.makeEmptyDataFrame()
githubLanguagesDF = d.makeEmptyLanguageDataFrame()
githubStageStatisticsDF = d.makeEmptyStageStatisticsDataFrame()
# Obtenemos la lista de repositorios Github.
lFound = ghs.getGithubRepos(usePickleFile)
# Aplicamos el proceso.
lResult = []
lResult = ghs.searchReposGitHubApi(lFound, githubDF, counterDF, githubLanguagesDF, githubStageStatisticsDF)
if doGitlabSearch:
fRepos = "gitlab_repos.pickle"
fResults = "results/gitlab/gitlab_results.xlsx"
fLanguages = "results/gitlab/gitlab_languages.xlsx"
fStageStatistics = "results/gitlab/gitlab_stage_statistics.xlsx"
lFound = []
lResult = []
# Generamos un DataFrame donde irán los resultados.
if useResultsExcelFile:
if os.path.exists(fResults):
gitlabDF = pd.read_excel(fResults, index_col=0)
gitlabLanguagesDF = pd.read_excel(fLanguages, index_col=0)
gitlabStageStatisticsDF = pd.read_excel(fStageStatistics, index_col=0)
else:
gitlabDF = d.makeEmptyDataFrame()
gitlabLanguagesDF = d.makeEmptyLanguageDataFrame()
gitlabStageStatisticsDF = d.makeEmptyStageStatisticsDataFrame()
else:
gitlabDF = d.makeEmptyDataFrame()
gitlabLanguagesDF = d.makeEmptyLanguageDataFrame()
gitlabStageStatisticsDF = d.makeEmptyStageStatisticsDataFrame()
if usePickleFile:
aux.printLog("Utilizando el fichero " + fRepos + " para generar los repositorios GitLab.", logging.INFO)
if os.path.exists(fRepos):
lFound = aux.loadRepositories(fRepos)
# Aplicamos el proceso.
lResult = gls.searchInProjectsGitLabApi(lFound, gitlabDF, counterDF, gitlabLanguagesDF, gitlabStageStatisticsDF)
else:
raise Exception("No se ha encontrado el fichero pickle en la raíz del proyecto.")
else:
lFound,lResult = gls.doSearchGitLabApi(gitlabDF, counterDF, gitlabLanguagesDF, gitlabStageStatisticsDF)
# Generamos un fichero EXCEL con los contadores.
d.makeEXCEL(counterDF, "counting")
aux.printLog("Proceso finalizado.", logging.INFO)
except:
aux.printLog("Se ha producido un ERROR inesperado.", logging.ERROR)
raise
# FIN
if execute:
executeProcess()
``` |
{
"source": "jorcus/DAND-Wrangle-OpenStreetMap-Data",
"score": 3
} |
#### File: jorcus/DAND-Wrangle-OpenStreetMap-Data/tags.py
```python
import os
import re
import pprint
import xml.etree.ElementTree as ET
DATASET = "san-jose_california.osm"
PATH = "./"
OSMFILE = PATH + DATASET
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
key = element.attrib['k']
if re.search(lower, key):
keys['lower'] += 1
elif re.search(lower_colon, key):
keys['lower_colon'] += 1
elif re.search(problemchars, key):
keys['problemchars'] += 1
else:
keys['other'] += 1
return keys
def process_map_tags(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
keys = process_map_tags(OSMFILE)
pprint.pprint(keys)
if __name__ == '__main__':
test()
``` |
{
"source": "jorcus/Multi-User-Blog",
"score": 2
} |
#### File: jorcus/Multi-User-Blog/model.py
```python
from secret import *
from google.appengine.ext import db
def users_key(group='default'):
return db.Key.from_path('users', group)
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid, parent=users_key())
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email=None):
pw_hash = make_pw_hash(name, pw)
return User(parent=users_key(),
name=name,
pw_hash=pw_hash,
email=email)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
if u and valid_pw(name, pw, u.pw_hash):
return u
class Comment(db.Model):
comment = db.StringProperty(required=True)
post = db.StringProperty(required=True)
creator = db.IntegerProperty(required=True)
creator_name = db.StringProperty(required=True)
@classmethod
def render(self):
return self.render("comment.html", c=self)
``` |
{
"source": "jord9762/jordy9762.github.io",
"score": 4
} |
#### File: Assessment_1/Task4_agents!/agentframework.py
```python
import random
"""Key ideas"""
"""init behaves like a constructor for the class agent. Adding additional arguments after self requires
specification of values otherwise an error will show up claiming positional arguments are missing. Self is automatically provided for the
class. Look here for more info on self: https://www.youtube.com/watch?v=AjYOMk-4NIU"""
#creates the class agent which will be called by our model
class agent:
#object being created
def __init__(self):
self._y = random.randint(0, 99)
self._x = random.randint(0, 99)
# Getters and Setters for x and y properties
def get_x(self):
"""getter for x."""
return self._x
def set_x(self, value):
"""setter for X."""
def get_y(self):
"""getter for Y."""
return self._y
def set_y(self, value):
"""setter for Y."""
def move(self):
"""
Moves the x and y variables or agents using the random library features
-------
None.
"""
if random.random() < 0.5:
self._y = (self._y + 1) % 100
else:
self._y = (self._y - 1) % 100
if random.random() < 0.5:
self._x = (self._x + 1) % 100
else:
self._x = (self._x - 1) % 100
```
#### File: Assessment_1/Task4_agents!/model.py
```python
import random
#allows additional operations such as the itemgetter function
import operator
#allows the presentation of plots,graphs etc.. will be used to display agents
import matplotlib.pyplot
import agentframework
def distance_between(agents_row_a, agents_row_b):
return (((agents_row_a._x - agents_row_b._x)**2) +
((agents_row_a._y - agents_row_b._y)**2))**0.5
num_of_agents = 10
num_of_iterations = 100
agents = []
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.agent())
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
a = agents[i]
print( a.get_y(), a.get_x()) #prints getter and setter for agents in agent framework, I did this to help users understand
#the process occuring throughout the code. The print shows the full 100 iterations for the 10 agents.
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.ylim(0, 99)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i]._x, agents[i]._y)
matplotlib.pyplot.show()
#iterate between all agents to find the distance between.
for agents_row_a in agents:
for agents_row_b in agents:
distance = distance_between(agents_row_a, agents_row_b)
```
#### File: Programming_for_GIA_Core_Skills/Assessment_2/learning.py
```python
import matplotlib
matplotlib.use('TkAgg')
import random
import operator
import csv
import drunkframework
import matplotlib.animation
import matplotlib.pyplot
import tkinter
"""WARNING!!!!!"""
"""Note to visualise the code in this file the code %matplotlib qt must be inputted in to the ipython console first. Or alternatively
the code can be ran in the command prompt. Note to run this code more than once in the Jupyter terminal may require a restart of the kernel."""
"""https://www.youtube.com/watch?v=8exB6Ly3nx0 this excellent resource had info on combining GUI with matplotlib data"""
#creates a new empty list for what will be the csv environment data, see https://docs.python.org/3/library/csv.html for more
environment = []
#drunks adapted from agents from GUI's practical
drunks = []
#specifies number of drunks/agents
num_of_drunks = 25
#outlines the number of iterations the line 64-78 code will undergo
num_of_iterations = 10
#sets the dimensions for the matplotlib plots
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
carry_on = True
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw()
#used to call GUI
root = tkinter.Tk()
root.wm_title("Model")
#Open window having dimension 700x700
root.geometry('700x700')
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
#configures GUI background to green
root.configure(background="green")
#adds a close button, use this rather than x to ensure resolution of scipt
menu_bar.add_command(label="Close", command=root.destroy)
#my_button class and parameters below change the GUI button to blue
my_button = tkinter.Button(root, text="Run model", command=run, bg='blue')#https://pythonexamples.org/python-tkinter-button-background-color/#:~:text=You%20can%20change%20the%20background,bg%20property%20as%20shown%20below.&text=The%20default%20color%20of%20Tkinter%20Button%20is%20grey.
my_button.pack(side=tkinter.TOP)#https://www.youtube.com/watch?v=Uk2FivOD8qo got idea from here
#sets up a canvas for a tkinter GUI where the animated model will be embedded.
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
#Initial check to make sure the CSV is in correct directory
#with open('drunk.txt', newline='') as f:
# reader = csv.reader(f)
#for row in reader:
# print(row)
f = open('drunk.txt', newline='')
#Note that the correct directory must be navigated to in the terminal else the full file path will be needed
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist =[]
for value in row:
rowlist.append(value)
environment.append(rowlist)
f.close()
## Make drunks and assign them with an identification number.
for i in range(num_of_drunks):
identification = ((1+i)*10)
print(identification) #this should print 10-250 giving each of the drunks an identification number, later to be matched up with houses
drunks.append(drunkframework.Drunk(environment, drunks, identification))
def update(frame_number):
fig.clear()
global carry_on
#The following code is adapted from the final piece just to show the user how the drunks
#move, note there is no stopping condition due to the omission of the while loop
for i in range (num_of_drunks):
for j in range(num_of_iterations):
drunks[i].move()
drunks[i].track()
#code on lines 107 to 113 embeds a scatter plot into the tkinter GUI with the environment as a backdrop.
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_drunks):
matplotlib.pyplot.scatter(drunks[i]._x, drunks[i]._y)
def gen_function(b = [0]):
a = 0
global carry_on
while (a < 100) & (carry_on):
#function returns generator
yield a
a = a + 1
tkinter.mainloop()
#Prints out density as a file
with open('density.txt', 'w', newline='') as f:
csvwriter = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in environment:
csvwriter.writerow(row)
``` |
{
"source": "jordan112/ComicTools",
"score": 3
} |
#### File: jordan112/ComicTools/locg.py
```python
from bs4 import BeautifulSoup
import requests
from fuzzywuzzy import fuzz
from time import process_time, strptime
import re
import urllib
import json
import re
import time
import calendar
import datetime
import comicutil
import comicdb
import copy
baseUrl = 'https://leagueofcomicgeeks.com'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def get_issue_by_id(id):
response = requests.get(id,headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
description = ""
try:
description = re.sub('<[^<]+?>', '', soup.find("div", {"class": "col-12 listing-description"}).text).replace("\n","").replace("\r","").replace("\t","")
except:
pass
details_section = soup.find_all("div", {"class": "col-xxl-4 col-lg-6 col-6 mb-3 details-addtl-block"})
page_count = details_section[1].find("div", {"class": "value"}).contents[0].strip().split()[0]
price = details_section[2].find("div", {"class": "value"}).contents[0].strip().split()[0]
creators_section = soup.find_all("div", {"class": "d-flex flex-column align-self-center"})
credits = []
for creator in creators_section:
try:
title = creator.find("div", {"class": "role color-offset copy-really-small"}).contents[0].strip()
for t in title.split(","):
c = {}
title = t.strip()
if "cover" in t.lower():
title = "Cover Artist"
if "story" in t.lower():
title = "Writer"
name = creator.find("div", {"class": "name color-primary font-weight-bold"}).find("a").contents[0].strip()
c["role"] = title
c["person"] = name
if c not in credits:
credits.append(c)
except:
pass
issueObject = {}
issueObject['price'] = price
issueObject['page_count'] = page_count
issueObject['credits'] = credits
issueObject['description'] = description
return issueObject
def get_series_by_id(id):
issue_list = []
#url = f"{baseUrl}/comic/get_comics?addons=1&list=series&list_option=&user_id=0&view=thumbs&format%5B%5D=1&format%5B%5D=5&date_type=&date=&date_end=&series_id={id}&creators=0&character=&title=&order=date-desc&_={calendar.timegm(time.gmtime())}"
url = f"{baseUrl}/comic/get_comics?addons=1&list=series&list_option=&user_id=0&view=thumbs&format%5B%5D=1&date_type=&date=&date_end=&series_id={id}&creators=0&character=&title=&order=date-desc&_={calendar.timegm(time.gmtime())}"
response = requests.get(url,headers=headers)
soup = BeautifulSoup(json.loads(response.text)["list"], 'html.parser')
issues = soup.find_all("li")
for issue in issues:
cover_section = issue.find("div", {"class": "cover"})
issue_link = f'{baseUrl}{cover_section.find("a")["href"]}'
issue_cover = cover_section.find("a").find("img")['data-src']
if "https" not in issue_cover:
issue_cover = f"{baseUrl}{issue_cover}"
issue_name = issue.find("div", {"class": "title color-primary"}).find("a").contents[0].strip()
epoch = issue.find("div", {"class": "details"}).find("span", {"class": "date"})['data-date']
store_date = "0000-00-00"
try:
store_date = datetime.datetime.fromtimestamp(int(epoch)).strftime('%Y-%m-%d')
except:
pass
issueObject = {}
issueObject['coverImage'] = issue_cover
issueObject['issueName'] = issue_name
issueObject['issueNumber'] = comicutil.get_issue_number(issue_name)
issueObject['id'] = issue_link
issueObject['issueLink'] = issue_link
issueObject['storeDate'] = store_date
issue_list.append(issueObject)
return issue_list
def search_series(query,volumeConfidence=0,issueConfidence=0):
results = []
comic_details = comicutil.get_comic_details(query)
#url = f"{baseUrl}/comic/get_comics?addons=1&list=search&list_option=series&user_id=0&view=thumbs&format%5B%5D=1&format%5B%5D=6&format%5B%5D=5&format%5B%5D=2&format%5B%5D=3&format%5B%5D=4&date_type=&date=&date_end=&series_id=0&creators=0&character=0&title={urllib.parse.quote(comic_details.series)}&order=alpha-asc&filterCreator=1&filterCharacter=1&_={calendar.timegm(time.gmtime())}"
url = f"{baseUrl}/comic/get_comics?addons=1&list=search&list_option=series&user_id=0&view=thumbs&format%5B%5D=1&date_type=&date=&date_end=&series_id=0&creators=0&character=0&title={urllib.parse.quote(comic_details.series)}&order=alpha-asc&filterCreator=1&filterCharacter=1&_={calendar.timegm(time.gmtime())}"
try:
response = requests.get(url,headers=headers)
except Exception as e:
return results
soup = BeautifulSoup(json.loads(response.text)["list"], 'html.parser')
all_series = soup.find_all("li")
for series in all_series:
issue_count = series.find("span", {"class": "details count-issues"}).contents[0].strip()
cover_section = series.find("div", {"class": "cover"})
volume_link = f'{baseUrl}{cover_section.find("a", {"class": "link-collection-series"})["href"]}'
series_cover = cover_section.find("a", {"class": "link-collection-series"}).find("img")["data-src"]
series_id = cover_section.find("a", {"class": "link-collection-series"})["data-id"]
publisher = series.find("div", {"class": "publisher color-offset"}).contents[0].strip()
series_name = series.find("div", {"class": "title color-primary"}).find("a").contents[0].strip()
start_year = series.find("div", {"class": "series"})['data-begin']
end_year = series.find("div", {"class": "series"})['data-end']
ratio = fuzz.ratio(series_name,comic_details.series)
if ratio > volumeConfidence:
series = {}
series["name"] = series_name
series["issue_count"] = issue_count
series["publisher"] = comicdb.map_publisher(publisher)
series["link"] = volume_link
series['year'] = f"{start_year}-{end_year}"
series['start_year'] = start_year
series['volumeYear'] = start_year
series["volumeName"] = series_name
series['end_year'] = end_year
series["confidence"] = ratio
series['id'] = series_id
results.append(series)
results.sort(key=lambda x: int(x['confidence']),reverse = True)
return results
def search_comics(query,volumeConfidence=0,issueConfidence=0):
results = []
comic_details = comicutil.get_comic_details(query)
url = f"{baseUrl}/comic/get_comics?addons=1&list=search&list_option=series&user_id=0&view=thumbs&format%5B%5D=1&format%5B%5D=6&format%5B%5D=5&format%5B%5D=2&format%5B%5D=3&format%5B%5D=4&date_type=&date=&date_end=&series_id=0&creators=0&character=0&title={urllib.parse.quote(comic_details.series)}&order=alpha-asc&filterCreator=1&filterCharacter=1&_={calendar.timegm(time.gmtime())}"
try:
response = requests.get(url,headers=headers)
except Exception as e:
return results
soup = BeautifulSoup(json.loads(response.text)["list"], 'html.parser')
all_series = soup.find_all("li")
for series in all_series:
cover_section = series.find("div", {"class": "cover"})
volume_link = f'{baseUrl}{cover_section.find("a", {"class": "link-collection-series"})["href"]}'
series_cover = cover_section.find("a", {"class": "link-collection-series"}).find("img")["data-src"]
series_id = cover_section.find("a", {"class": "link-collection-series"})["data-id"]
publisher = series.find("div", {"class": "publisher color-offset"}).contents[0].strip()
series_name = series.find("div", {"class": "title color-primary"}).find("a").contents[0].strip()
start_year = series.find("div", {"class": "series"})['data-begin']
end_year = series.find("div", {"class": "series"})['data-end']
ratio = fuzz.ratio(series_name,comic_details.series)
if ratio > volumeConfidence:
series_details = get_series_by_id(series_id)
for issue in series_details:
issue_ratio = fuzz.ratio(issue['issueName'],f"{comic_details.series} #{comic_details.issue}")
issue["volumeName"] = series_name
issue["volumeLink"] = volume_link
issue["publisher"] = comicdb.map_publisher(publisher)
issue['volumeYear'] = start_year
issue['confidence'] = issue_ratio
results.append(issue)
results.sort(key=lambda x: int(x['confidence']),reverse = True)
return results
def search_comics_scrape(query,volumeConfidence=0,issueConfidence=0):
url = '{}/search?keyword={}'.format(baseUrl,urllib.parse.quote(query))
comic_details = comicutil.get_comic_details(query)
response = requests.get(url,headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
volumes = soup.find_all("a", {"class": "link-collection-series"})
searchResults = []
for series in volumes:
if series.contents[0] != "\n":
ratio = fuzz.ratio(series.contents[0].strip(),comic_details.series)
if ratio > volumeConfidence:
url = '{}{}'.format(baseUrl,series['href'])
seriesid = series['data-id']
url = f"{baseUrl}/comic/get_comics?addons=1&list=series&list_option=&user_id=0&view=thumbs&format%5B%5D=1&format%5B%5D=5&date_type=&date=&date_end=&series_id={seriesid}&creators=0&character=&title=&order=date-desc&_={calendar.timegm(time.gmtime())}"
response = requests.get(url,headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
volumeYear = 'UNKNOWN'
try:
volumeYear = comicutil.getYearFromVolume(soup.find("div", {"class": "page-details"}).find("div", {"class": "header-intro"}).contents[2])
except Exception as e:
pass
issues = soup.find("ul", {"id": "comic-list-issues"}).find_all("li")
for issue in issues:
comicTitle = issue.find("div", {"class": "title color-primary"})
description = ''
try:
description = issue.find("div", {"class": "comic-description"}).text
except:
pass
title = comicTitle.find('a').contents[0]
secondratio = fuzz.ratio(f"{comic_details.series} #{comic_details.issue}",comicutil.stripBadChars(title))
if secondratio > issueConfidence:
issueObject = {}
cover_image = issue.find("div", {"class": "cover"}).find('a').find("img")['data-src']
if "medium-" in cover_image:
cover_image = cover_image.replace("medium","large")
issueObject['coverImage'] = cover_image
issueObject['volumeName'] = series.contents[0].strip()
issueObject['issueName'] = title.strip()
issueObject['issueNumber'] = comicutil.get_issue_number(title)
issueObject['issueLink'] = '{}{}'.format(baseUrl,comicTitle.find('a')['href'])
issueObject['volumeLink'] = url
issueObject['publisher'] = comicdb.map_publisher(issue.find("div", {"class": "publisher color-offset"}).contents[0].strip())
issueObject['description'] = description
issueObject['storeDate'] = '000-00-00'
dateArray = issue.find("div", {"class": "details"}).find("span",{"class": "date"}).text.replace(',','').split()
try:
issueObject['storeDate'] = '{}-{}-{}'.format(dateArray[2],strptime(dateArray[0],'%b').tm_mon,dateArray[1][:-2])
except Exception as e:
pass
issueObject['volumeId'] = series['href']
issueObject['issueId'] = comicTitle.find('a')['href']
issueObject['volumeYear'] = volumeYear
#issueObject['diamondSku'] = issue.find("span", {"class": "comic-diamond-sku"}).contents[0].strip()
issueObject['confidence'] = str(secondratio)
searchResults.append(issueObject)
searchResults.sort(key=lambda x: int(x['confidence']),reverse = True)
return searchResults
if __name__ == "__main__":
#results = get_issue_by_id("https://leagueofcomicgeeks.com/comic/1717071/way-of-x-2")
results = get_issue_by_id("https://leagueofcomicgeeks.com/comic/6599986/teenage-mutant-ninja-turtles-the-last-ronin-1")
print(results)
```
#### File: jordan112/ComicTools/main.py
```python
from tkinter import filedialog
from tkinter import *
import copy
import shutil
import sys
import os
import locg
import comicvine
import comicutil
import archiveutil
import imageutil
import config
import comixology
import comicdb
from comicapi import comicinfoxml, filenameparser
SETTINGS = config.get_config()
REMOVE_SCENE_PROMO = SETTINGS["remove_scene_promo"]
REMOVE_SUBFOLDERS = SETTINGS["remove_subfolders"]
REMOVE_COMIXOLOGY_META = SETTINGS["remove_comixology_meta"]
LIBRARY_LOCATION = SETTINGS["library_location"]
KEEP_ORIGINAL = SETTINGS["keep_original"]
COMPARE_COVERS = SETTINGS["compare_covers"]
PDF_ZOOM = SETTINGS['from_pdf_zoom']
ARCHIVE_TYPE = SETTINGS['archive_type']
METADATA_TYPE = SETTINGS['metadata_type']
IMAGE_TYPE = SETTINGS['image_type']
RENAME_TEMPLATE = SETTINGS['rename_template']
COMIC_DATABASE = SETTINGS["comic_database"]
WRITE_METADATA = SETTINGS["write_metadata"] # true, false, overwrite, merge_existing, merge_new
def screen_clear():
# for mac and linux(here, os.name is 'posix')
if os.name == 'posix':
_ = os.system('clear')
else:
# for windows platfrom
_ = os.system('cls')
# print out some text
def move_to_library(file,details):
extension = os.path.splitext(file)[1]
new_name = copy.copy(RENAME_TEMPLATE)
seperator = os.path.sep
publisher = details["publisher"]
series = details["volumeName"]
issue_name = details["issueName"]
issue_year = details["storeDate"].split('-')[0]
series_year = details["volumeYear"]
if series_year == "UNKNOWN" or series_year == "PRESENT" or series_year == None or series_year == "":
series_year = issue_year
issue = details['issueNumber']
new_name = new_name.format(seperator=seperator,publisher=publisher,series=series,series_year=series_year,issue_year=issue_year,issue_pad2=comicutil.pad_to_length(issue,2),issue_pad3=comicutil.pad_to_length(issue,3),issue_name=issue_name)
new_name = os.path.join(LIBRARY_LOCATION,f"{new_name}{extension}")
new_name = comicutil.stripBadChars(new_name)
os.makedirs(os.path.dirname(new_name),exist_ok=True)
shutil.move(file,new_name)
print(f"Comic copied to {new_name}")
def convert_to(oldfile,newfile,metadata=None,image_type=IMAGE_TYPE):
try:
tmp = ""
try:
tmp = archiveutil.extract_to_temp(oldfile,PDF_ZOOM)
except Exception as e:
print("Extract error: ",e)
shutil.rmtree(tmp)
return False
if REMOVE_SCENE_PROMO:
try:
comicutil.remove_promos_from_dir(tmp)
except Exception as e:
print(f"Error removing promos: {e}")
shutil.rmtree(tmp)
return False
if REMOVE_COMIXOLOGY_META:
try:
comicutil.remove_comixology_meta_from_dir(tmp)
except Exception as e:
print(f"Error removing promos: {e}")
shutil.rmtree(tmp)
return False
if IMAGE_TYPE != "" and IMAGE_TYPE != None:
imageutil.convert_dir_to_type(tmp,image_type)
if REMOVE_SUBFOLDERS:
archiveutil.remove_subfolders(tmp)
if metadata is not None:
try:
meta = comicutil.comicdb_to_meta(metadata)
metas = comicutil.get_meta_from_dir(tmp)
for m in comicutil.metadata_files:
if metas[METADATA_TYPE] == "" and WRITE_METADATA:
comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE)
elif metas[METADATA_TYPE] != "" and WRITE_METADATA == "overwrite":
comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE)
elif metas[METADATA_TYPE] != "" and WRITE_METADATA:
if METADATA_TYPE == "ComicInfo.xml":
xml1 = comicinfoxml.ComicInfoXml().stringFromMetadata(metas[METADATA_TYPE])
xml2 = comicinfoxml.ComicInfoXml().stringFromMetadata(meta)
xml3 = ""
if WRITE_METADATA == "merge_new":
xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml1")
if WRITE_METADATA == "merge_existing":
xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml2")
new_meta = comicinfoxml.ComicInfoXml().metadataFromString(xml3)
comicutil.write_meta_to_dir(new_meta,tmp,METADATA_TYPE)
except Exception as e:
print(f"Failed to write metadata to directory: {repr(e)}")
try:
return archiveutil.dir_to_archive(tmp,newfile,metadata)
except Exception as e:
print("Archive error: ",e)
shutil.rmtree(tmp)
return False
except Exception as e:
print(f"Convert error: {repr(e)}")
return False
def file_or_folder():
print("--------------------------")
print("File or Folder? Default: 1")
print("--------------------------")
print("1: File")
print("2: Folder")
print("--------------------------")
val = input("") or "1"
if val == "":
return
if val == "1":
Tk().withdraw()
return filedialog.askopenfilename(initialdir = "/",title = "Select file")
if val == "2":
Tk().withdraw()
return filedialog.askdirectory(initialdir="/",title='Select directory')
def convert_to_archive_type():
print("--------------------------")
print("Convert to New Format")
print("Default 1")
print("--------------------------")
print(f"1: Convert to Prefered Type: {ARCHIVE_TYPE.upper()}")
print("2: Convert to CBZ")
print("3: Convert to CBR")
print("4: Convert to PDF")
print("5: Convert to CB7")
print("6: Convert to CBA")
print("7: Convert to CBT")
print("8: Convert to EPUB")
print("--------------------------")
val = input("") or "1"
selected = file_or_folder()
if selected == "" or selected == None:
return
file_types = {"1":ARCHIVE_TYPE.lower(),"2":"cbz","3":"cbr","4":"pdf","5":"cb7","6":"cba","7":"cbt","8":"epub"}
if os.path.isdir(selected):
for subdir, dirs, files in os.walk(selected):
for file in files:
if archiveutil.is_archive(file):
old_file = subdir + os.sep + file
base_file = os.path.splitext(old_file)[0]
new_file = f"{base_file}.{file_types[val]}"
converted = convert_to(old_file,new_file)
print(f"file coverted: {converted}")
if not KEEP_ORIGINAL and converted and old_file != new_file:
os.remove(old_file)
else:
if archiveutil.is_archive(selected):
old_file = selected
base_file = os.path.splitext(old_file)[0]
new_file = f"{base_file}.{file_types[val]}"
converted = convert_to(old_file,new_file)
if not KEEP_ORIGINAL and converted and old_file != new_file:
os.remove(old_file)
def convert_to_image_type():
print("--------------------------")
print("Convert to New Format")
print("Default 1")
print("--------------------------")
print(f"1: Convert to Prefered Type: {IMAGE_TYPE.upper()}")
print("2: Convert to JPG")
print("3: Convert to PNG")
print("4: Convert to WEBP")
print("5: Convert to BMP")
print("6: Convert to GIF")
print("7: Convert to TIFF")
print("--------------------------")
val = input("") or "1"
selected = file_or_folder()
if selected == "" or selected == None:
return
image_types = {"1":IMAGE_TYPE.lower(),"2":"jpg","3":"png","4":"webp","5":"bmp","6":"gif","7":"tiff"}
if os.path.isdir(selected):
for subdir, dirs, files in os.walk(selected):
for file in files:
if archiveutil.is_archive(file):
file = subdir + os.sep + file
converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val])
print(f"Image files in {converted} converted to {image_types[val]}")
else:
if archiveutil.is_archive(selected):
file = selected
converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val])
print(f"Image files in {converted} converted to {image_types[val]}")
def remove_scene_promos():
print("--------------------------")
print("Remove Scene Promos")
print("")
selected = file_or_folder()
if selected == "" or selected == None:
return
if os.path.isdir(selected):
for subdir, dirs, files in os.walk(selected):
for file in files:
filepath = subdir + os.sep + file
if archiveutil.is_archive(filepath):
comicutil.remove_promos_from_file(filepath,PDF_ZOOM)
else:
if archiveutil.is_archive(selected):
comicutil.remove_promos_from_file(selected,PDF_ZOOM)
def set_database():
print("--------------------------")
print("Set Comic Database")
print("Default 1")
print("--------------------------")
print("1: Comicvine")
print("2: League of Comic Geeks")
print("3: Comixology")
print("4: Back to Menu")
print("--------------------------")
val = input("") or "1"
if val == "1":
comicdb.set_database("comicvine")
print("Comic Database set to Comicvine")
if val == "2":
comicdb.set_database("locg")
print("Comic Database set to League of Comic Geeks")
if val == "3":
comicdb.set_database("comixology")
print("Comic Database set to Comixology")
if val == "4" or val.lower() == "q" or val == "quit" or val == "back":
comicdb.set_database("comicvine")
def tag_interactive(filename,results=None,issues=None):
last_series = None
results = results
issues = issues
originalfilename = filename
details = comicutil.get_comic_details(filename)
query = f"{details.series}"
if details.year == None or details.year == "":
details.year = "0000"
if results != None:
last_series = results["last_series"]
results = results["results"]
if results == None:
results = comicdb.search_series(query,50,70)
print("-----------------------------------------------------------------")
print(f"File Name: {originalfilename}")
#print(f"Search Query: {query}")
stop = False
year_range = False
for result in results:
if not stop:
if result["end_year"] == "PRESENT" or result["end_year"] == "UNKNOWN":
year_range = True
elif int(details.year) <= int(result["end_year"]):
year_range = True
if year_range:
val = None
if last_series != details.series:
print(f'---------------------- Seach Result -----------------------------')
print(f'Series Name: {result["name"]}')
print(f'Year: {result["year"]}')
print(f'Publisher: {result["publisher"]}')
print(f'Issues: {result["issue_count"]}')
print(f'Series Link: {result["link"]}')
print("-----------------------------------------------------------------")
print(f'Name Match Confidence: {result["confidence"]}')
print("-----------------------------------------------------------------")
val = input("Is this the right series? (y/n/q) default (y): ") or "y"
else:
val = "y"
if val.lower() == "y" or val.lower() == "yes":
val = "y"
elif val.lower() == "n" or val.lower() == "no":
val = "n"
elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit":
val = "q"
if val == "y":
if issues == None:
issues = comicdb.get_series_by_id(result["id"])
for issue in issues:
if issue['issueNumber'] == details.issue or details.issue == "":
if "storeDate" not in issue:
issue.update(comicdb.get_issue_by_id(issue["id"]))
extracted = ""
if COMPARE_COVERS:
extracted = archiveutil.extract_to_temp(filename)
#if details.year in issue["storeDate"]:
if True:
print("-----------------------------------------------------------------")
print(f"File Name: {originalfilename}")
print(f'---------------------- Seach Result -----------------------------')
print(f'Issue Name: {issue["issueName"]}')
print(f'Store Date: {issue["storeDate"]}')
print(f'Issue Link: {issue["issueLink"]}')
print(f'Series Name: {result["name"]}')
print(f'Series Year: {result["year"]}')
print(f'Publisher: {result["publisher"]}')
if COMPARE_COVERS:
webcover = imageutil.getImageFromUrl(issue['coverImage'])
cbcover = comicutil.get_cover_from_dir(extracted)
cover_confidence = imageutil.compare_images2(webcover,cbcover)
print(f'Cover Match Confidence: {cover_confidence}')
print("-----------------------------------------------------------------")
val = input("Rename with these details? (y/n/q) default (y): ") or "y"
if val.lower() == "y" or val.lower() == "yes":
val = "y"
elif val.lower() == "n" or val.lower() == "no":
val = "n"
elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit":
val = "q"
if val == "y":
if "description" not in issue:
issue.update(comicdb.get_issue_by_id(issue["id"]))
file_folder = os.path.dirname(filename)
issue_name = comicutil.stripBadChars(comicutil.remove_issue_number(issue["issueName"]))
issue_year = issue["storeDate"].split("-")[0]
#new_filename = f'{issue_name} #{comicutil.pad_to_length(issue["issueNumber"])} ({issue_year}).{ARCHIVE_TYPE.lower()}'
new_filename = f"temp.{ARCHIVE_TYPE.lower()}"
new_file = os.path.join(file_folder,new_filename)
meta_details = copy.deepcopy(result)
meta_details.update(issue)
converted = convert_to(filename,new_file,meta_details)
if not KEEP_ORIGINAL and converted and filename != new_file:
os.remove(filename)
move_to_library(new_file,meta_details)
result["last_series"] = details.series
results = {"results":results,"last_series":details.series}
return results, issues, ""
break
if val == "n":
pass
if val == "q":
if os.path.isdir(extracted):
shutil.rmtree(extracted)
stop = True
break
if val == "q":
return results, issues, "quit"
if val == "n":
pass
try:
if os.path.isdir(extracted):
shutil.rmtree(extracted)
except:
pass
results = {"results":results,"last_series":details.series}
return results, issues, ""
def search_and_tag_interactive():
print("--------------------------")
print("Get Details and Organize")
print("")
selected = file_or_folder()
if selected == "" or selected == None:
return
if os.path.isdir(selected):
results = None
issues = None
for subdir, dirs, files in os.walk(selected):
for file in files:
filepath = subdir + os.sep + file
if archiveutil.is_archive(filepath):
details = comicutil.get_comic_details(filepath)
if results is not None:
if results["last_series"] != details.series:
results = None
issues = None
r, i, q = tag_interactive(filepath,results,issues)
results = r
issues = i
if q == "quit":
break
else:
if archiveutil.is_archive(selected):
tag_interactive(selected)
def main_menu():
print("============================================================================")
print("What would like to do? default: 1")
print("")
print("1: Tag and organize.")
print("2: Convert archive type.")
print("3: Convert image type.")
print("4: Remove scene promos.")
print("5: Change Comic Database.")
print("6: List Supported Archive Types.")
print("7: Quit.")
print("============================================================================")
val = input("") or "1"
if val == "1":
search_and_tag_interactive()
if val == "2":
convert_to_archive_type()
if val == "3":
convert_to_image_type()
if val == "4":
remove_scene_promos()
if val == "5":
set_database()
if val == "6":
archiveutil.list_supported_formats()
if val == "7" or val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit":
sys.exit()
if __name__ == "__main__":
while True:
main_menu()
``` |
{
"source": "jordan16ellis/fw-coll-env",
"score": 2
} |
#### File: fw-coll-env/scripts/example.py
```python
import numpy as np
import fw_coll_env
import fw_coll_env_c
def main():
env_c = fw_coll_env_c.FwCollisionEnv(
dt=0.1, max_sim_time=30, done_dist=25, safety_dist=25,
goal1=fw_coll_env_c.Point(200, 0, 0),
goal2=fw_coll_env_c.Point(-200, 0, 0),
time_warp=-1)
avail_actions = fw_coll_env_c.FwAvailActions([15], [-12, 0, 12], [0])
res_lims = np.array([[-200, -200, -np.pi, 0], [200, 200, np.pi, 0]])
goal1_lims = np.array([[200, 0, 0], [200, 0, 0]])
goal2_lims = np.array([[-200, 0, 0], [-200, 0, 0]])
env = fw_coll_env.env.FwCollisionGymEnv(
env_c, res_lims, res_lims, goal1_lims, goal2_lims, avail_actions)
env.reset()
while True:
ac = np.array([0, 0, 0])
obs, rew, done, info = env.step(ac)
env.render()
if done:
env.reset()
if __name__ == '__main__':
main()
```
#### File: fw-coll-env/tests/test_style.py
```python
import subprocess
from pathlib import Path
from typing import List, Any
import pytest
@pytest.fixture
def python_files() -> List[str]:
python_paths = \
(Path(__file__).resolve().parent.parent).rglob('*.py')
return [str(p) for p in python_paths if 'submodules' not in str(p)]
# pylint: disable=redefined-outer-name
def test_flake8(python_files: Any) -> None:
subprocess.check_call(['flake8'] + python_files)
# pylint: disable=redefined-outer-name
def test_pydocstyle(python_files: Any) -> None:
subprocess.check_call(
['pydocstyle'] + python_files + ['--ignore=D1,D203,D213,D416'])
# pylint: disable=redefined-outer-name
def test_pylint(python_files: Any) -> None:
rcfile = str(Path(__file__).resolve().parent / '.pylintrc')
subprocess.check_call(
['pylint'] + python_files +
[f'--rcfile={rcfile}', '--score=no', '--reports=no'])
# pylint: disable=redefined-outer-name
def test_mypy(python_files: Any) -> None:
subprocess.check_call(
['mypy'] + python_files +
['--disallow-untyped-defs', '--ignore-missing-imports',
'--follow-imports', 'silent'])
``` |
{
"source": "Jordan19980601/data-structure",
"score": 4
} |
#### File: Jordan19980601/data-structure/12.11樹狀結構.py
```python
'107153915 樹狀結構'
btree=[None]*1024
btree[1]='A'
btree[2]='B'
btree[3]='C'
btree[4]='D'
btree[5]='E'
btree[7]='F'
def preorder(p):
if btree[p]:
print(btree[p], ' ', end='');
preorder(2*p);
preorder(2*p+1);
def inorder(p):
if btree[p]:
inorder(2*p);
print(btree[p], ' ', end='');
inorder(2*p+1);
def postorder(p):
if btree[p]:
postorder(2*p);
postorder(2*p+1);
print(btree[p], ' ', end='');
preorder(1)
print()
inorder(1)
print()
postorder(1)
print()
``` |
{
"source": "jordan2lee/gdan-tmp-webdash",
"score": 2
} |
#### File: graph-etl/etl/__init__.py
```python
import sys
import jsonschema
import pkg_resources
from copy import deepcopy
from functools import partial
from dictionaryutils import DataDictionary, load_schemas_from_dir
from etl.gid import vertex_gid
class CustomDataDictionary(DataDictionary):
"""
Modified from:
https://github.com/uc-cdis/dictionaryutils/blob/42bf330d82bf084141c0f21b9815cc7e34bf5287/dictionaryutils/__init__.py#L112
"""
def __init__(
self,
root_dir,
definitions_paths=None,
metaschema_path=None
):
self.root_dir = root_dir
self.metaschema_path = ""
self.definitions_paths = ""
self.exclude = (["_gids.yaml"])
self.schema = dict()
self.resolvers = dict()
self.metaschema = dict()
self.load_data(directory=self.root_dir, url=None)
def load_data(self, directory=None, url=None):
"""Load and reslove all schemas from directory or url"""
yamls, resolvers = load_schemas_from_dir(pkg_resources.resource_filename(__name__, "schema"))
yamls, resolvers = load_schemas_from_dir(directory,
schemas=yamls,
resolvers=resolvers)
self.settings = yamls.get(self.settings_path) or {}
self.resolvers.update(resolvers)
schemas = {
schema["id"]: self.resolve_schema(schema, deepcopy(schema))
for path, schema in yamls.items()
if path not in self.exclude
}
self.schema.update(schemas)
class ClassInstance:
def __init__(self, **kwargs):
self.__dict__["_props"] = {}
if "properties" in self._schema:
for k in self._schema["properties"].keys():
if k in ["label", "backref"]:
continue
elif k in ["gid", "from", "to"]:
k = "_%s" % k
self.__dict__["_props"][k] = None
for k, v in kwargs.items():
if k in ["gid", "from", "to"]:
k = "_%s" % k
self.__setattr__(k, v)
def props(self, preserve_null=False, exclude=[]):
data = {}
for k, v in self._props.items():
if k.startswith("_"):
continue
if k in exclude:
continue
data[k] = v
if not preserve_null:
nulls = [k for k in data if data[k] is None]
for k in nulls:
del data[k]
return data
def schema(self):
return self._schema
def validate(self):
data = deepcopy(self.props())
data["gid"] = self.gid()
data["label"] = self.label()
if "_from" in self._props:
data["from"] = self._from
if "_to" in self._props:
data["to"] = self._to
try:
jsonschema.validate(data, self.schema())
except Exception as e:
print("instance:", data, file=sys.stderr)
print("schema:", self._schema, file=sys.stderr)
raise e
def label(self):
return self._label
def gid(self):
return self._gid
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__,
self.props(preserve_null=True))
def __setattr__(self, key, item):
if key == "_label":
raise KeyError("setting _label is not permitted")
if key not in self._props:
raise KeyError("object does not contain key '{}'".format(key))
self._props[key] = item
def __getattr__(self, key):
return self._props[key]
def __getitem__(self, k):
return self.__getattr__(k)
def __setitem__(self, k, v):
return self.__setattr__(k, v)
class Vertex:
pass
class Edge:
pass
_schemaPath = pkg_resources.resource_filename(__name__, "schema")
_schema = CustomDataDictionary(root_dir=_schemaPath)
__all__ = ['Vertex', 'Edge']
for k, schema in _schema.schema.items():
cls_name = schema["id"]
label = schema['properties']['label']['const']
is_vertex = True
if 'from' in schema['properties'] and 'to' in schema['properties']:
is_vertex = False
if is_vertex:
cls = type(
cls_name,
(ClassInstance, Vertex),
{
'_schema': schema,
'_label': label,
'make_gid': partial(vertex_gid, label)
}
)
else:
cls = type(
cls_name,
(ClassInstance, Edge),
{
'_schema': schema,
'_label': label,
'gid': lambda self: "(%s)--%s->(%s)" % (self._from, self._label, self._to)
}
)
cls._backref = None
cls.backref = lambda self: None
if 'backref' in schema['properties']:
parts = cls_name.split('_')
if len(parts) != 2:
raise ValueError('Unexpected id format for edge')
backref_name = '%s_%s' % (parts[1], parts[0])
backref = schema['properties']['backref']['const']
backref_schema = deepcopy(schema)
backref_schema['id'] = backref_name
backref_schema['properties']['backref']['const'] = label
backref_schema['properties']['label']['const'] = backref
backref_schema['properties']['from'] = schema['properties']['to']
backref_schema['properties']['to'] = schema['properties']['from']
backref_cls = type(
backref_name,
(ClassInstance, Edge),
{
'_schema': backref_schema,
'_label': backref,
'gid': lambda self: "(%s)--%s->(%s)" % (self._from, self._label, self._to)
}
)
cls._backref = backref_cls
cls.backref = lambda self: self._backref(
_from=self._to,
_to=self._from,
**self.props()
)
backref_cls._backref = cls
backref_cls.backref = lambda self: self._backref(
_from=self._to,
_to=self._from,
**self.props()
)
globals()[backref_name] = backref_cls
__all__.append(backref_name)
globals()[cls_name] = cls
__all__.append(cls_name)
```
#### File: graph-etl/etl/utils.py
```python
import os
def ensure_directory(*args):
path = os.path.join(*args)
if os.path.isfile(path):
raise Exception(
"Emitter output directory %s is a regular file", path)
if not os.path.exists(path):
os.makedirs(path)
```
#### File: graph-etl/transforms/transform_predictions.py
```python
import argparse
import csv
import json
from etl.emitter import new_emitter
from etl import (Sample, Split, Model, Prediction, Cancer, Subtype, FeatureSet,
Sample_Prediction, Split_Prediction, Model_Prediction,
Model_Split, Model_Cancer, Model_FeatureSet)
def transform_one(input_matrix,
emitter_prefix,
emitter_directory="."):
emitter = new_emitter(name="json",
directory=emitter_directory,
prefix=emitter_prefix)
emitted_models = {}
i = 0
with open(input_matrix, "r") as fh:
for line in csv.DictReader(filter(lambda row: row[0] != '#', fh), delimiter="\t"):
sample_id = line["Sample_ID"]
cancer_id = line["Label"].split(":")[0]
assert cancer_id.isupper() # check it looks like a TCGA project code
repeat = line["Repeat"][1:] if line["Repeat"].startswith("R") else line["Repeat"]
fold = line["Fold"][1:] if line["Fold"].startswith("F") else line["Fold"]
split_id = "%s:R%s:F%s" % (cancer_id, repeat, fold)
if i == 0:
for model_id in list(line.keys())[5:]:
parts = model_id.split("|")
if len(parts) != 4:
ValueError("key format incorrect, expected 'unique_classifier_name|feature_set_id|date_stamp|c/p'")
model = Model(
gid=Model.make_gid("%s:%s" % (cancer_id, parts[0])),
)
if model.gid() not in emitted_models:
emitter.emit_vertex(model)
emitter.emit_edge(
Model_Cancer(
_from=model.gid(),
_to=Cancer.make_gid(cancer_id)
),
emit_backref=True
)
feature_set_id = parts[1]
emitter.emit_edge(
Model_FeatureSet(
_from=model.gid(),
_to=FeatureSet.make_gid(feature_set_id)
),
emit_backref=True
)
emitted_models[model.gid()] = True
i = 1
for key, pred_val in line.items():
if key in list(line.keys())[:6]:
continue
parts = key.split("|")
model = Model(
gid=Model.make_gid("%s:%s" % (cancer_id, parts[0])),
)
metadata = None
prediction = None
if parts[-1] == "p" or pred_val.startswith("{"):
metadata = json.loads(pred_val)
max_prob = 0
for subtype, prob in metadata["classification"].items():
if prob > max_prob:
prediction = subtype
max_prob = prob
else:
prediction = pred_val
prediction = prediction if prediction.startswith(cancer_id) else "%s:%s" % (cancer_id, prediction)
label = line["Label"] if line["Label"].startswith(cancer_id) else "%s:%s" % (cancer_id, line["Label"])
prediction = Prediction(
gid=Prediction.make_gid("%s:%s:%s:%s:%s" % (cancer_id, parts[0], parts[1], split_id, sample_id)),
predicted_value=Subtype.make_gid(prediction),
actual_value=Subtype.make_gid(label),
metadata=metadata,
type="testing" if line["Test"] == "1" else "training",
repeat=int(repeat),
fold=int(fold)
)
emitter.emit_vertex(prediction)
emitter.emit_edge(
Model_Prediction(
_from=model.gid(),
_to=prediction.gid(),
),
emit_backref=True
)
emitter.emit_edge(
Sample_Prediction(
_from=Sample.make_gid(sample_id),
_to=prediction.gid(),
),
emit_backref=True
)
emitter.emit_edge(
Model_Split(
_from=model.gid(),
_to=Split.make_gid(split_id),
),
emit_backref=True
)
emitter.emit_edge(
Split_Prediction(
_from=Split.make_gid(split_id),
_to=prediction.gid(),
),
emit_backref=True
)
emitter.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--input-matrix', '-i',
type=str,
required=True,
help='predictions matrix'
)
parser.add_argument(
'--emitter-prefix', '-p',
type=str,
required=True,
help='emitter prefix'
)
parser.add_argument(
'--emitter-dir', '-d',
type=str,
default='.',
help='emitter prefix'
)
args = parser.parse_args()
transform_one(args.input_matrix, args.emitter_prefix, args.emitter_dir)
```
#### File: gdan-tmp-webdash/reformat/DEV-features-addALL.py
```python
import os
import glob
import argparse
def get_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument("-in", "--input", help ="input dir", required=True, type=str)
parser.add_argument("-out", "--output", help ="output dir", required=True, type=str)
return parser.parse_args()
args = get_arguments()
f = args.input # /Users/leejor/Ellrott_Lab/gdan-tmp-webdash/reformat/TESTING2/features_reformatted_gnosis20200408.tsv
ft_output = args.output # /Users/leejor/Ellrott_Lab/gdan-tmp-webdash/reformat/TESTING2_output
# set up file names
outputname = "TESTING2--features_reformatted_gnosis20200408.tsv"
# Iterate through one file
with open(f, 'r') as fh, open(ft_output+"/"+outputname, 'w') as out:
irow = 0
for line in fh:
# Write col headers to out
if irow == 0:
line = line.strip().split('\t')
nmodels = len(line)
print('total models', nmodels)
samp = line[0]
repeat = line[1]
fold = line[2]
# No reformat all meta data cols
out.write(samp + '\t' + repeat + '\t' + fold + "\n")
irow+=1
else:
classifer = line.split('\t')[0]
ftmethod = line.split('\t')[1]
temporal = line.split('\t')[2]
classifer = "ALL:" + classifer #new line
newline = classifer + '\t' + ftmethod + '\t' + temporal
out.write(newline)
print('Created new file: ', outputname)
```
#### File: gdan-tmp-webdash/reformat/reformat-ohsu-preds.py
```python
import json
import pandas as pd
import argparse
def get_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument("-in", "--input", help ="name of input file", required=True, type=str)
parser.add_argument("-out", "--output", help ="name of output file", required=True, type=str)
return parser.parse_args()
args = get_arguments()
pred_file = args.input
output_name = args.output
def prob2crisp(name):
"""
input one model name that will convert last bit of p to c
"""
new_model_name = name.split('|')[: -1]
new_model_name.append('c')#add c for crisp
new_model_name = '|'.join(new_model_name)
return new_model_name
def qc_prediction(PREDICTION_C):
import re
# 1. Search and remove class if in string. classACC:2 -> ACC:2
wrong = re.match('class', PREDICTION_C)
if wrong:
PREDICTION_C = re.sub('class', '', PREDICTION_C)
#print(PREDICTION_C)
# 2. Search and replace subtype name. ACC:2 -> ACC:ACC_2
tumor, subtype = re.split(r":", PREDICTION_C)
if tumor not in subtype:
PREDICTION_C = re.sub(subtype, tumor+"_"+subtype, PREDICTION_C)
#print(PREDICTION_C)
return PREDICTION_C
#####
# Read in
#####
# Read in file
print(pred_file)
raw_pred = pd.read_csv(pred_file, sep='\t')
# raw_pred = pd.read_csv(pred_file, skiprows=4178, sep='\t', index_col=0)
#raw_pred
#####
# Create template of new file of crisp predictions
#####
matrix_crisp = raw_pred.iloc[:, :4]
#check format Labels col. ACC:2 -> ACC:ACC_2
tmp = []
for i in raw_pred["Label"]:
if i != qc_prediction(i):
i = qc_prediction(i)
tmp.append(i)
else:
tmp.append(i)
#add Label col to matrix
matrix_crisp['Label']= tmp
#matrix_crisp
######
# Create crisp matrix
######
# create df of just model predictions
df = raw_pred.iloc[:,5:]
models = df.columns
col_ct = 0 # debugging
row_ct = 0 # debugging
for m in models:
#print("###", i, "###")
# get crisp label from probabilites in a given cell
new_col = []
for row in df[m]:
row = qc_prediction(row)
# contents_dict = json.loads(row)
# for k,v in contents_dict.items():
# if k=='classification':
# #print(v)
# subtype_crisp = max(v, key=v.get)
# subtype_crisp = qc_prediction(subtype_crisp)
# #print(subtype_crisp)
# new_col.append(subtype_crisp)
new_col.append(row)
row_ct+=1
#add crisp to matrix_crisp
# i = prob2crisp(m)
# matrix_crisp[i] = new_col
matrix_crisp[m] = new_col
col_ct+=1
#rename col
matrix_crisp=matrix_crisp.rename(columns = {'Unnamed: 0':'Sample_ID'})
# save output
matrix_crisp.to_csv(output_name, sep='\t', index=False)
print('created ', output_name)
``` |
{
"source": "jordan2lee/grip",
"score": 2
} |
#### File: grip/conformance/run_conformance.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import random
import string
import sys
import json
import traceback
from glob import glob
BASE = os.path.dirname(os.path.abspath(__file__))
TESTS = os.path.join(BASE, "tests")
GRIPQL = os.path.join(os.path.dirname(BASE), "gripql", "python")
sys.path.append(GRIPQL)
import gripql # noqa: E402
try:
from importlib.machinery import SourceFileLoader
def load_test_mod(name):
return SourceFileLoader('test.%s' % name, os.path.join(TESTS, name + ".py")).load_module()
except ImportError:
# probably running older python without newer importlib
import imp
def load_test_mod(name):
return imp.load_source('test.%s' % name, os.path.join(TESTS, name + ".py"))
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size)).lower()
class SkipTest(Exception):
pass
class Manager:
def __init__(self, conn, readOnly=False):
self._conn = conn
self.readOnly = readOnly
self.curGraph = ""
self.curName = ""
def newGraph(self):
if self.readOnly is None:
self.curGraph = "test_graph_" + id_generator()
self._conn.addGraph(self.curGraph)
else:
self.curGraph = args.readOnly
def setGraph(self, name):
if self.readOnly is not None:
return self._conn.graph(self.readOnly)
if self.curName == name:
return self._conn.graph(self.curGraph)
if self.curGraph != "":
self.clean()
self.curGraph = "test_graph_" + id_generator()
self._conn.addGraph(self.curGraph)
G = self._conn.graph(self.curGraph)
with open(os.path.join(BASE, "graphs", "%s.vertices" % (name))) as handle:
for line in handle:
data = json.loads(line)
G.addVertex(data["gid"], data["label"], data.get("data", {}))
with open(os.path.join(BASE, "graphs", "%s.edges" % (name))) as handle:
for line in handle:
data = json.loads(line)
G.addEdge(src=data["from"], dst=data["to"],
gid=data.get("gid", None), label=data["label"],
data=data.get("data", {}))
self.curName = name
return G
def clean(self):
if self.readOnly is None:
self._conn.deleteGraph(self.curGraph)
def writeTest(self):
if self.readOnly is not None:
raise SkipTest
self.clean()
self.curName = ""
self.curGraph = "test_graph_" + id_generator()
self._conn.addGraph(self.curGraph)
G = self._conn.graph(self.curGraph)
return G
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"server",
type=str,
help="GRIP server url"
)
parser.add_argument(
"tests",
type=str,
nargs='*',
help="conformance test suite(s) to run"
)
parser.add_argument(
"--exclude",
"-e",
type=str,
nargs="+",
default=[],
help="Exclude test suite(s)"
)
parser.add_argument(
"--methods",
"-m",
type=str,
nargs="+",
default=[],
help="Unit Test Methods"
)
parser.add_argument(
"--readOnly",
"-r",
default=None
)
args = parser.parse_args()
server = args.server
if len(args.tests) > 0:
tests = ["ot_" + t for t in args.tests]
else:
tests = [os.path.basename(a)[:-3] for a in glob(os.path.join(TESTS, "ot_*.py"))]
# filter out excluded tests
tests = [t for t in tests if t[3:] not in args.exclude]
conn = gripql.Connection(server)
correct = 0
total = 0
manager = Manager(conn, args.readOnly)
for name in tests:
mod = load_test_mod(name)
for f in dir(mod):
if f.startswith("test_"):
func = getattr(mod, f)
if callable(func):
if len(args.methods) == 0 or f[5:] in args.methods:
try:
print("Running: %s %s " % (name, f[5:]))
try:
e = func(manager)
except SkipTest:
continue
if len(e) == 0:
correct += 1
print("Passed: %s %s " % (name, f[5:]))
else:
print("Failed: %s %s " % (name, f[5:]))
for i in e:
print("\t- %s" % (i))
except Exception as e:
print("Crashed: %s %s %s" % (name, f[5:], e))
traceback.print_exc()
total += 1
manager.clean()
print("Passed %s out of %s" % (correct, total))
if correct != total:
sys.exit(1)
```
#### File: conformance/tests/ot_keycheck.py
```python
def test_subkey(man):
"""
Bug in KVGraph scanned edge index prefixes, if key was a prefix subkey of another,
edge sets would get merged (ie get outgoing from 'Work' and get edges from 'Work' and 'Workflow')
"""
errors = []
G = man.writeTest()
G.addVertex("Work", "Thing", {})
G.addVertex("Workflow", "Thing", {})
G.addVertex("Other", "Thing", {})
G.addVertex("OtherGuy", "Thing", {})
G.addEdge("Work", "Other", "edge")
G.addEdge("Workflow", "OtherGuy", "edge")
count = 0
for i in G.query().V("Work").out():
count += 1
if count != 1:
errors.append("Incorrect outgoing vertex count %d != %d" % (count, 1))
count = 0
for i in G.query().V("Work").outE():
count += 1
if count != 1:
errors.append("Incorrect outgoing edge count %d != %d" % (count, 1))
count = 0
for i in G.query().V("Other").inE():
count += 1
if count != 1:
errors.append("Incorrect incoming edge count %d != %d" % (count, 1))
return errors
```
#### File: conformance/tests/ot_labels.py
```python
def test_list_labels(man):
errors = []
G = man.setGraph("swapi")
resp = G.listLabels()
print(resp)
if len(resp["vertex_labels"]) != 6:
errors.append("listLabels returned an unexpected number of vertex labels; %d != 2" % (len(resp["vertex_labels"])))
if sorted(resp["vertex_labels"]) != ["Character", "Film", "Planet", "Species", "Starship", "Vehicle"]:
errors.append("listLabels returned unexpected vertex labels")
if len(resp["edge_labels"]) != 10:
errors.append("listLabels returned an unexpected number of edge labels; %d != 10" % (len(resp["edge_labels"])))
if sorted(resp["edge_labels"]) != ["characters", "films", "homeworld", "people", "pilots", "planets", "residents", "species", "starships", "vehicles"]:
errors.append("listLabels returned unexpected edge labels")
return errors
```
#### File: gripper/test-graph/make-table-graph.py
```python
import os
import sys
import json
"""
make-table-graph.py
This script take a vertex and edge file and translates it into a series of
tables that could be loaded into a varity of data systems to test the dig driver
"""
def graph2tables(vFile, eFile, ePlanFile):
tables = {}
vTypes = {}
with open(vFile) as handle:
for line in handle:
data = json.loads(line)
label = data['label']
if label not in tables:
tables[label] = []
tables[label].append(data)
vTypes[data['gid']] = data['label']
eTable = []
with open(eFile) as handle:
for line in handle:
data = json.loads(line)
eTable.append(data)
ePlan = []
with open(ePlanFile) as handle:
for line in handle:
data = json.loads(line)
ePlan.append(data)
for plan in ePlan:
if plan['mode'] == "edgeTable":
o = []
for e in eTable:
if vTypes[e['to']] == plan['to'] and vTypes[e['from']] == plan['from'] and e['label'] == plan['label']:
f = e['from'].split(":")[1]
t = e['to'].split(":")[1]
d = e.get("data", {})
d["from"] = f
d["to"] = t
o.append( d )
elif plan['mode'] == 'fieldToID':
for e in eTable:
if vTypes[e['to']] == plan['to'] and vTypes[e['from']] == plan['from'] and e['label'] == plan['label']:
#print("add %s to %s" % (e['to'], e['from']))
for v in tables[vTypes[e['from']]]:
if v['gid'] == e['from']:
dstID = e['to'].split(":")[1]
v['data'][ plan['field'] ] = dstID
else:
raise Exception("Unknown edge mode")
tables[plan['name']] = o
return tables
def keyUnion(a):
o = set()
for i in a:
o.update(*a)
return list(o)
if __name__ == "__main__":
vFile = sys.argv[1]
eFile = sys.argv[2]
ePlanFile = sys.argv[3]
outdir = sys.argv[4]
tables = graph2tables(vFile, eFile, ePlanFile)
#print(tables)
with open(os.path.join(outdir, "table.map"), "w") as tHandle:
for name, rows in tables.items():
p = os.path.join(outdir, "%s.tsv" % (name))
with open(p, "w") as handle:
if 'data' in rows[0] and 'gid' in rows[0]:
headers = keyUnion( list(list(r['data'].keys()) for r in rows) )
handle.write("\t".join(['id'] + headers) + "\n")
for row in rows:
id = row['gid'].split(":")[1]
handle.write("\t".join( [json.dumps(id)] + list( json.dumps(row['data'].get(k,"")) for k in headers ) ) + "\n")
else:
headers = list(rows[0].keys())
handle.write("\t".join(headers) + "\n")
for row in rows:
handle.write("\t".join(list( json.dumps(row[k]) for k in headers )) + "\n")
tHandle.write("%s\t%s.tsv\n" % (name, name))
```
#### File: gripper/test-graph/table-server.py
```python
import os
import re
import sys
import json
import grpc
import gripper_pb2
import gripper_pb2_grpc
from google.protobuf import json_format
from concurrent import futures
def keyUnion(a):
o = set()
for i in a:
o.update(*a)
return list(o)
class CollectionServicer(gripper_pb2_grpc.DigSourceServicer):
def __init__(self, data):
self.data = data
def GetCollections(self, request, context):
for i in self.data:
o = gripper_pb2.Collection()
o.name = i
yield o
def GetCollectionInfo(self, request, context):
o = gripper_pb2.CollectionInfo()
c = self.data[request.name]
for f in keyUnion(i.keys() for i in c.values()):
o.search_fields.append( "$." + f)
return o
def GetIDs(self, request, context):
for k in self.data[request.name]:
o = gripper_pb2.RowID()
o.id = k
yield o
def GetRows(self, request, context):
for k,v in sorted(self.data[request.name].items()):
o = gripper_pb2.Row()
o.id = k
json_format.ParseDict(v, o.data)
yield o
def GetRowsByID(self, request_iterator, context):
for req in request_iterator:
d = self.data[req.collection][req.id]
o = gripper_pb2.Row()
o.id = req.id
o.requestID = req.requestID
json_format.ParseDict(d, o.data)
yield o
def GetRowsByField(self, request, context):
c = self.data[request.collection]
f = re.sub( r'^\$\.', '', request.field)
for k, v in sorted(c.items()):
if v.get(f, None) == request.value:
o = gripper_pb2.Row()
o.id = k
json_format.ParseDict(v, o.data)
yield o
def serve(port, data):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=100))
gripper_pb2_grpc.add_DigSourceServicer_to_server(
CollectionServicer(data), server)
server.add_insecure_port('[::]:%s' % port)
server.start()
print("Serving: %s" % (port))
server.wait_for_termination()
if __name__ == "__main__":
tableMap = {}
dataMap = {}
with open(sys.argv[1]) as handle:
for line in handle:
row = line.rstrip().split("\t")
name = row[0]
path = os.path.join( os.path.dirname(os.path.abspath(sys.argv[1]) ), row[1] )
data = {}
with open(path) as h:
header = None
for l in h:
r = l.rstrip().split("\t")
if header is None:
header = r
else:
j = list(json.loads(i) for i in r)
d = dict(zip(header,j))
if 'id' in d:
data[str(d['id'])] = d
else:
data[str(len(data))] = d
dataMap[name] = data
serve(50051, dataMap)
```
#### File: tests/unit/header_test.py
```python
import json
import os
import tempfile
import unittest
from gripql import Connection
from gripql.util import BaseConnection
def headersOverlap(actual, expected):
for k, v in expected.items():
assert k in actual
assert actual[k] == v
class TestRequestHeaderFormat(unittest.TestCase):
mock_url = "http://fakehost:8000"
def test_connection(self):
b = BaseConnection(self.mock_url)
headersOverlap(b.session.headers, {'Content-type': 'application/json'})
b = BaseConnection(self.mock_url, user="test", password="password")
headersOverlap(b.session.headers, {'Content-type': 'application/json', 'Authorization': 'Basic dGVzdDpwYXNzd29yZA=='})
b = BaseConnection(self.mock_url, token="<PASSWORD>")
headersOverlap(b.session.headers, {'Content-type': 'application/json', 'Authorization': 'Bearer iamnotarealtoken'})
creds = {"OauthEmail": "<EMAIL>",
"OauthAccessToken": "<PASSWORD>",
"OauthExpires": 1551985931}
tmp = tempfile.NamedTemporaryFile(mode="w", delete=False)
json.dump(creds, tmp)
tmp.close()
expected = creds.copy()
expected["OauthExpires"] = str(expected["OauthExpires"])
expected["Content-type"] = "application/json"
b = BaseConnection(self.mock_url, credential_file=tmp.name)
os.remove(tmp.name)
headersOverlap(b.session.headers, expected)
# test header propagation to Graph and Query classes
c = Connection(self.mock_url, token="<PASSWORD>")
self.assertEqual(c.session.headers, c.graph('test').session.headers)
self.assertEqual(c.session.headers, c.graph('test').query().session.headers)
``` |
{
"source": "jordan31bit/Simp_PWIbot",
"score": 3
} |
#### File: jordan31bit/Simp_PWIbot/apiDiscord.py
```python
from distutils.command.clean import clean
import discord
import commands
import clean_reponse
com = commands.Commands
client = discord.Client()
async def pingResponseBuilder(testing) :
embed = discord.Embed()
embed.title="PWI Server Check"
embed.description="Lists ping, thus signifies that server is up or down "
embed.add_field(name="Server", value="Tideswell \n Etherblade \n Twilight Temple \n DawnGlory", inline=True)
embed.add_field(name="Latency", value=testing, inline=True)
return(embed)
async def determineWhat(list) :
if list[4] == 1 :
#send to cleaners get str back of latency
clean = clean_reponse.Handle_Responses
list.pop(4)
foo = ""
foo = clean.cleanServerResp(list)
titleBuilder = "PWI Server Check"
descriptionBuilder = "Lists ping, thus signifies if server is up or down."
fieldName1 = "Server"
fieldValue1 = "Tideswell \n Etherblade \n Twilight Temple \n DawnGlory"
fieldName2 = "Latency"
embed = await genericBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldValue1, fieldName2, foo)
return(embed)
elif list[4] == 2 :
titleBuilder = "PWI Blessings Codes"
descriptionBuilder = "Jones' Blessing = 45 Att levels & O'malley's Blessing = 15 Att and Def levels."
fieldName1 = "Jone's Blessing"
fieldValue1 = 'fkNu6Sni'
fieldName2 = "O'malley's Blessing"
fieldValue2 = 'dRp86qzN'
embed = await genericBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldValue1, fieldName2, fieldValue2)
return(embed)
elif list[4] == 3 :
titleBuilder = "PWI Price Checking"
descriptionBuilder = "This will query pwcats in order to perform search and obtain needed info."
fieldName1 = "Item"
fieldValue1 = 'item1'
fieldName2 = "Price"
fieldValue2 = '1,000,000.00' + ' gold coins'
embed = await genericBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldValue1, fieldName2, fieldValue2)
return(embed)
elif list[4] == 0 :
titleBuilder = "Simp PWI Bot Help"
descriptionBuilder = "Below is the avaiable commands currently. Also, check out my Git repo https://git.sr.ht/~jordan31/simp_pwi_bot"
fieldName1 = "Commands"
fieldValue1 = 'help'
fieldValue2 = 'ping'
fieldValue3 = 'price'
fieldName2 = "Description"
fieldValue4 = 'Displays this menue.'
fieldValue5 = 'Checks to see if servers are online, then returns the latency.'
fieldValue6 = 'Searchs PWCats and returns the item info you searched for.'
fieldValue7 = 'Displays the Blessings codes from PWI.'
fieldValue8 = 'codes'
embed = await helpMenueBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldName2, fieldValue1, fieldValue2, fieldValue3, fieldValue4, fieldValue5, fieldValue6, fieldValue7, fieldValue8)
return(embed)
async def genericBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldValue1, fieldName2, latency) :
embed = discord.Embed()
embed.title=titleBuilder
embed.description=descriptionBuilder
embed.add_field(name=fieldName1, value=fieldValue1, inline=True)
embed.add_field(name=fieldName2, value=latency, inline=True)
return(embed)
async def helpMenueBuilder(titleBuilder, descriptionBuilder, fieldName1, fieldName2, fieldValue1, fieldValue2, fieldValue3, fieldValue4, fieldValue5, fieldValue6, fieldValue7,fieldValue8) :
tmp = fieldValue1+'\n'+fieldValue2+'\n'+fieldValue3+'\n'+fieldValue8
tmp2 = fieldValue4+'\n'+fieldValue5+'\n'+fieldValue6+'\n'+fieldValue7
embed = discord.Embed()
embed.title=titleBuilder
embed.description=descriptionBuilder
embed.add_field(name=fieldName1, value=tmp, inline=True)
embed.add_field(name=fieldName2, value=tmp2, inline=True)
return(embed)
class APIDiscord(discord.Client):
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('pwi'):
tmp = await com.whichCommand(message.content)
poop = await determineWhat(tmp)
#poop = await genericBuilder(tmp)
await message.channel.send(embed=poop)
client.run('TOKEN HERE')
```
#### File: jordan31bit/Simp_PWIbot/query_server.py
```python
from ctypes import sizeof
import socket
import time
import math
import clean_reponse
server = ["pwieast2.perfectworld.com", "pwiwest4.perfectworld.com", "pwigc2.perfectworld.com", "pwieu3.en.perfectworld.eu"]
port = 29000
class MyClass:
def bob(foo):
print(foo)
return
async def pingServer():
latency = []
try:
for x in server :
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as soc:
try :
startTime = time.time()
soc.connect((x, port))
response = soc.recv(1024)
endTime = time.time()
totalTime = endTime - startTime
latency.append(totalTime * 1000)
print(response)
soc.close()
except :
print(x+" is down 🔴")
latency.append("🔴")
except:
soc.close()
return("Error: I broke :(")
soc.close()
latency.append(1)
# clean = clean_reponse.Handle_Responses
# latency = clean.cleanServerResp(latency)
return(latency)
async def getBlessings() :
# return a list of mixed data types
return
async def autoCheckLatency() :
print("I'm in here!!!!!")
state = 0 # 0 assumed up, 1 server is up, 2 server is down
tmp = state
qs = MyClass
list = await qs.pingServer()
for x in list :
if type(x) in list == str :
tmp = ++tmp
if state != tmp :
return(list)
else :
time.sleep(3)
else :
tmp = state
time.sleep(3)
``` |
{
"source": "Jordan396/Twitter-Sentiment-Analysis",
"score": 3
} |
#### File: Jordan396/Twitter-Sentiment-Analysis/main_TextBlob.py
```python
import tweetProcesser
import tweetCleaner
import csv
from textblob import TextBlob
def TextBlobCleanRaw():
'''
Raw TextBlob model
Our current model uses the pre-cleaned raw TextBlob model.
'''
tweet_counter = 0
with open("results_textblob_raw.txt","w",encoding="utf-8") as preresults:
newWriter = csv.writer(preresults, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open("raw_twitter.txt","r",encoding="utf-8") as preproccessed:
for line in preproccessed.readlines():
tweet_counter += 1
try:
print("Processing tweet: {}".format(tweet_counter))
tweet = tweetCleaner.lowercase(line)
tweet = tweetCleaner.StopWordRemover(tweet)
tweet = tweetCleaner.removeSpecialChars(tweet)
tweet = tweetCleaner.removeAllNonAlpha(tweet)
tweet = tweetCleaner.lemmatizer(tweet)
wiki = TextBlob(tweet)
normalized_score, sentiment_label = tweetProcesser.sentimentClassifier(wiki, 0)
newWriter.writerow([normalized_score, sentiment_label,tweet])
except:
newWriter.writerow(["0","neutral", "BLANK"])
print("ERROR processing tweet: {}".format(tweet_counter))
def TextBlobCleanAbbrev():
'''
TextBlob model with abbreviations extended.
'''
tweet_counter = 0
tweetProcesser.abbreviation_extender()
with open("results_textblob_abbrev.txt","w",encoding="utf-8") as postresults:
newWriter = csv.writer(postresults, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open("abbreviations_twitter.txt","r",encoding="utf-8") as postprocessed:
for line in postprocessed.readlines():
tweet_counter += 1
try:
print("Processing tweet: {}".format(tweet_counter))
tweet = tweetCleaner.StopWordRemover(line)
tweet = tweetCleaner.removeSpecialChars(tweet)
tweet = tweetCleaner.removeAllNonAlpha(tweet)
tweet = tweetCleaner.lemmatizer(tweet)
wiki = TextBlob(tweet)
normalized_score, sentiment_label = tweetProcesser.sentimentClassifier(wiki, 0)
newWriter.writerow([normalized_score, sentiment_label, tweet])
except:
newWriter.writerow(["0","neutral", "BLANK"])
print("ERROR processing tweet: {}".format(tweet_counter))
def TextBlobCleanEmoji():
'''
TextBlob model with Emoticon scoring.
'''
tweet_counter = 0
with open("results_textblob_emoji.txt","w",encoding="utf-8") as preresults:
newWriter = csv.writer(preresults, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open("raw_twitter.txt","r",encoding="utf-8") as preproccessed:
for line in preproccessed.readlines():
tweet_counter += 1
try:
print("Processing tweet: {}".format(tweet_counter))
tweet = tweetCleaner.lowercase(line)
tweet = tweetCleaner.StopWordRemover(tweet)
tweet = tweetCleaner.removeSpecialChars(tweet)
tweet,score = tweetProcesser.emoticon_score(tweet)
tweet = tweetCleaner.removeAllNonAlpha(tweet)
tweet = tweetCleaner.lemmatizer(tweet)
wiki = TextBlob(tweet)
normalized_score, sentiment_label = tweetProcesser.sentimentClassifier(wiki, score)
newWriter.writerow([normalized_score, sentiment_label, tweet])
except:
newWriter.writerow(["0","neutral", "ERROR"])
print("ERROR processing tweet: {}".format(tweet_counter))
def TextBlobCleanAbbrevEmoji():
'''
TextBlob model with Emoticon scoring and extended abbreviations.
'''
tweet_counter = 0
tweetProcesser.abbreviation_extender()
with open("results_textblob_abbrev_emoji.txt","w",encoding="utf-8") as preresults:
newWriter = csv.writer(preresults, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open("abbreviations_twitter.txt","r",encoding="utf-8") as preproccessed:
for line in preproccessed.readlines():
tweet_counter += 1
try:
print("Processing tweet: {}".format(tweet_counter))
tweet = tweetCleaner.StopWordRemover(line)
tweet = tweetCleaner.removeSpecialChars(tweet)
tweet,score = tweetProcesser.emoticon_score(tweet)
tweet = tweetCleaner.removeAllNonAlpha(tweet)
tweet = tweetCleaner.lemmatizer(tweet)
wiki = TextBlob(tweet)
normalized_score, sentiment_label = tweetProcesser.sentimentClassifier(wiki, score)
newWriter.writerow([normalized_score, sentiment_label, tweet])
except:
newWriter.writerow(["0","neutral", "ERROR"])
print("ERROR processing tweet: {}".format(tweet_counter))
print("====================TEST BEGIN=======================")
'''
BASIC: This is the main function we will be executing.
It combines all the cleaning and processing steps described in the GitHub README.
Run this script in your python command shell.
'''
TextBlobCleanAbbrevEmoji()
'''
ADVANCED: Sometimes, performing excessive cleaning operations on the input may worsen the accuracy of the model.
Hence, here are several other models you may wish to test for accuracy comparison.
The description of the models may be found under the individual functions above.
To test a model, simply comment the above "Basic" model and uncomment any of the models below.
Run this script in your python command shell.
'''
#TextBlobCleanRaw()
#TextBlobCleanAbbrev()
#TextBlobCleanEmoji()
print("====================TEST END=========================")
``` |
{
"source": "Jordan886/gitlab-open-merge-request-resource",
"score": 3
} |
#### File: gitlab-open-merge-request-resource/src/load_json.py
```python
import json
class LoadJson:
def __init__(self, payload):
self.data = json.load(payload)
self.source = self.data['source']
try:
self.api_url = self.source['api_url']
self.access_token = self.source['access_token']
self.project = self.source['project']
self.branch = self.source['branch']
except:
raise Exception("Missing required source config")
# check if also params exist
if 'params' in self.data :
self.params = self.data['params']
try:
self.target_branch = self.params['target_branch']
# Optional params
if 'delete_source' in self.params:
self.delete_source_branch = self.params['delete_source']
except:
raise Exception("Missing parameters")
# version is passed in get and put steps
if 'version' in self.data :
self.version = self.data['version']
try:
self.id = self.version['iid']
except:
raise Exception("Version not set")
``` |
{
"source": "jordan9001/asmplay",
"score": 2
} |
#### File: jordan9001/asmplay/asmline.py
```python
import capstone
# pip3 install keystone-engine
import keystone
haveunicorn = False
try:
import unicorn
haveunicorn = True
except ModuleNotFoundError:
pass
try:
import readline
except ModuleNotFoundError:
pass
AP_MODE_ASM = "asm"
AP_MODE_DIS = "dis"
AP_MODE_EMU = "emu"
AP_MODES = [AP_MODE_ASM, AP_MODE_DIS, AP_MODE_EMU]
AP_ARCH_X86 = "x86"
AP_ARCH_ARM = "arm"
AP_ARCH_ARM64 = "arm64"
AP_ARCHS = [AP_ARCH_X86, AP_ARCH_ARM, AP_ARCH_ARM64]
AP_BITS_16 = "16"
AP_BITS_32 = "32"
AP_BITS_64 = "64"
AP_BITS_ARM = "arm"
AP_BITS_ARMBE = "arm_be"
AP_BITS_THUMB = "thumb"
AP_BITS_THUMBBE = "thumb_be"
AP_BITS_DEF = "def"
AP_BITS_FOR_ARM = [AP_BITS_ARM, AP_BITS_ARMBE, AP_BITS_THUMB, AP_BITS_THUMBBE, AP_BITS_DEF]
AP_BITS_FOR_X86 = [AP_BITS_16, AP_BITS_32, AP_BITS_64, AP_BITS_DEF]
AP_BITS = AP_BITS_FOR_X86 + AP_BITS_FOR_ARM
AP_SYNT_ATT = "att"
AP_SYNT_GAS = "gas"
AP_SYNT_INTEL = "intel"
AP_SYNT_MASM = "masm"
AP_SYNT_NASM = "nasm"
AP_SYNT_DEF = "def"
AP_SYNTS = [AP_SYNT_ATT, AP_SYNT_GAS, AP_SYNT_INTEL, AP_SYNT_MASM, AP_SYNT_NASM, AP_SYNT_DEF]
arch2cs = {
AP_ARCH_X86 : capstone.CS_ARCH_X86,
AP_ARCH_ARM : capstone.CS_ARCH_ARM,
AP_ARCH_ARM64 : capstone.CS_ARCH_ARM64,
}
bits2cs = {
AP_BITS_16 : capstone.CS_MODE_16,
AP_BITS_32 : capstone.CS_MODE_32,
AP_BITS_64 : capstone.CS_MODE_64,
AP_BITS_ARM : capstone.CS_MODE_ARM,
AP_BITS_ARMBE : capstone.CS_MODE_ARM | capstone.CS_MODE_BIG_ENDIAN,
AP_BITS_THUMB : capstone.CS_MODE_THUMB,
AP_BITS_THUMBBE : capstone.CS_MODE_THUMB | capstone.CS_MODE_BIG_ENDIAN,
AP_BITS_DEF : 0,
}
synt2cs = {
AP_SYNT_ATT : capstone.CS_OPT_SYNTAX_ATT,
AP_SYNT_GAS : capstone.CS_OPT_SYNTAX_ATT,
AP_SYNT_INTEL : capstone.CS_OPT_SYNTAX_INTEL,
AP_SYNT_MASM : capstone.CS_OPT_SYNTAX_MASM,
AP_SYNT_NASM : capstone.CS_OPT_SYNTAX_INTEL,
AP_SYNT_DEF : 0,
}
arch2uc = None
bits2uc = None
if haveunicorn:
arch2uc = {
AP_ARCH_X86 : unicorn.UC_ARCH_X86,
AP_ARCH_ARM : unicorn.UC_ARCH_ARM,
AP_ARCH_ARM64 : unicorn.UC_ARCH_ARM64,
}
bits2uc = {
AP_BITS_16 : unicorn.UC_MODE_16,
AP_BITS_32 : unicorn.UC_MODE_32,
AP_BITS_64 : unicorn.UC_MODE_64,
AP_BITS_ARM : unicorn.UC_MODE_ARM,
AP_BITS_ARMBE : unicorn.UC_MODE_ARM | unicorn.UC_MODE_BIG_ENDIAN,
AP_BITS_THUMB : unicorn.UC_MODE_THUMB,
AP_BITS_THUMBBE : unicorn.UC_MODE_THUMB | unicorn.UC_MODE_BIG_ENDIAN,
AP_BITS_DEF : 0,
}
arch2ks = {
AP_ARCH_X86 : keystone.KS_ARCH_X86,
AP_ARCH_ARM : keystone.KS_ARCH_ARM,
AP_ARCH_ARM64 : keystone.KS_ARCH_ARM64,
}
bits2ks = {
AP_BITS_16 : keystone.KS_MODE_16,
AP_BITS_32 : keystone.KS_MODE_32,
AP_BITS_64 : keystone.KS_MODE_64,
AP_BITS_ARM : keystone.KS_MODE_ARM,
AP_BITS_ARMBE : keystone.KS_MODE_ARM | keystone.KS_MODE_BIG_ENDIAN,
AP_BITS_THUMB : keystone.KS_MODE_THUMB,
AP_BITS_THUMBBE : keystone.KS_MODE_THUMB | keystone.KS_MODE_BIG_ENDIAN,
AP_BITS_DEF : 0,
}
synt2ks = {
AP_SYNT_ATT : keystone.KS_OPT_SYNTAX_ATT,
AP_SYNT_GAS : keystone.KS_OPT_SYNTAX_GAS,
AP_SYNT_INTEL : keystone.KS_OPT_SYNTAX_INTEL,
AP_SYNT_MASM : keystone.KS_OPT_SYNTAX_MASM,
AP_SYNT_NASM : keystone.KS_OPT_SYNTAX_NASM,
AP_SYNT_DEF : 0,
}
CMDS = \
"""
In ASM or EMU mode:
<assembly ending in empty line>
In DIS mode:
<hex encoded bytes to be disassembled ending in empty line>
Mode Commands:
MODE (ASM|DIS|EMU)
ARCH (X86|ARM|ARM64)
BITS (16|32|64|ARM|THUMB|ARM_BE|THUMB_BE) # depends on the current arch
SYNT (NASM|ATT) # applies to x86 arch
INFO # get current mode info
HELP # print this text
QUIT
"""
WELCOME = \
"""
/\ _ _ | . _ _
/--\_)||||__|| )(-
""" + CMDS
PROMPTE = " > "
def hex2b(ins):
ins = ins.translate({ord(x): None for x in " \t\n-:"})
return bytes.fromhex(ins)
def disassemble_bytes(inb, arch, bits, synt=AP_SYNT_DEF, withbytes=False):
cs = capstone.Cs(arch2cs[arch], bits2cs[bits])
if synt != AP_SYNT_DEF:
cs.syntax = synt2cs[synt]
out = ""
for i in cs.disasm(inb, 0):
if withbytes:
out += i.bytes.hex() + ' '
out += i.mnemonic + ' ' + i.op_str + '\n'
return out
def disassemble_hex(ins, arch, bits, synt=AP_SYNT_DEF):
inb = hex2b(ins)
return disassemble_bytes(inb, arch, bits, synt)
def emulate(ins, arch, bits, synt=AP_SYNT_DEF, allocstack=True):
if not haveunicorn:
raise Exception("Must have unicorn engine installed to use emulate feature")
code = assemble(ins, arch, bits, synt)
addr = 0x1000
uc = unicorn.Uc(arch2uc[arch], bits2uc[bits])
PGSZ = 0x1000
roundup = (len(code) + (PGSZ-1)) & (~(PGSZ-1))
uc.mem_map(addr, roundup)
uc.mem_write(addr, code)
if allocstack:
stkaddr = 0xe000
spaddr = 0xf000
stksz = 0x2000
uc.mem_map(stkaddr, stksz)
sp = None
if arch == AP_ARCH_ARM:
sp = unicorn.arm_const.UC_ARM_REG_SP
elif arch == AP_ARCH_ARM64:
sp = unicorn.arm64_const.UC_ARM64_REG_SP
elif arch == AP_ARCH_X86 and bits == AP_BITS_64:
sp = unicorn.x86_const.UC_X86_REG_RSP
elif arch == AP_ARCH_X86 and bits == AP_BITS_32:
sp = unicorn.x86_const.UC_X86_REG_ESP
elif arch == AP_ARCH_X86 and bits == AP_BITS_16:
sp = unicorn.x86_const.UC_X86_REG_SP
else:
raise Exception("Stack allocation not supported for this arch")
uc.reg_write(sp, spaddr)
try:
uc.emu_start(addr, addr+len(code))
except unicorn.UcError as e:
print("Got Emulation error:", e)
# dump state
# more / fewer registers?
rgmod = None
prefix = ""
namelist = []
if arch == AP_ARCH_ARM:
prefix = "UC_ARM_REG_"
rgmod = unicorn.arm_const
namelist = "r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 lr sp cpsr pc Q0 Q1".split()
elif arch == AP_ARCH_ARM64:
prefix = "UC_ARM64_REG_"
rgmod = unicorn.arm64_const
namelist = "x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 lr sp cpsr pc Q0 Q1".split()
elif arch == AP_ARCH_X86 and bits == AP_BITS_64:
prefix = "UC_X86_REG_"
rgmod = unicorn.x86_const
namelist = "rax rbx rcx rdx rsi rdi r8 r9 rsp rbp rip xmm0 xmm1".split()
elif arch == AP_ARCH_X86 and bits == AP_BITS_32:
prefix = "UC_X86_REG_"
rgmod = unicorn.x86_const
namelist = "eax ebx ecx edx esi edi esp ebp eip xmm0 xmm1".split()
elif arch == AP_ARCH_X86 and bits == AP_BITS_16:
prefix = "UC_X86_REG_"
rgmod = unicorn.x86_const
namelist = "ax bx cx dx si di sp bp ip".split()
else:
raise Exception("Unexpected arch")
for r in namelist:
rn = prefix + r.upper()
rg = getattr(rgmod, rn)
try:
rval = uc.reg_read(rg)
except unicorn.UcError:
continue
if isinstance(rval, int):
print(r, '=', hex(rval))
def assemble(ins, arch, bits, synt=AP_SYNT_DEF):
ks = keystone.Ks(arch2ks[arch], bits2ks[bits])
if synt != AP_SYNT_DEF:
ks.syntax = synt2ks[synt]
b, _ = ks.asm(ins)
return bytes(b)
def get_prompt(mode, arch, bits, synt=AP_SYNT_DEF):
archbits = arch
if arch == AP_ARCH_X86:
archbits += '_'+ bits
elif bits in AP_BITS_FOR_ARM:
if bits in [AP_BITS_THUMB, AP_BITS_THUMBBE]:
archbits += "(thumb)"
if bits in [AP_BITS_ARMBE, AP_BITS_THUMBBE]:
archbits += "(BE)"
return mode.upper() +' '+ archbits + PROMPTE
def get_info(mode, arch, bits, synt=AP_SYNT_DEF):
out = ""
out += "MODE: " + mode.upper() + "\n"
out += "ARCH: " + arch.upper() + "\n"
if bits != AP_BITS_DEF:
out += "BITS: " + bits.upper() + "\n"
if arch == "x86" and synt != AP_SYNT_DEF:
out += "SYNT: " + synt.upper() + "\n"
return out
def main():
mode = AP_MODE_ASM
arch = AP_ARCH_X86
bits = AP_BITS_64
synt = AP_SYNT_DEF
#TODO parse cmd args
print(WELCOME)
while True:
prompt = get_prompt(mode, arch, bits, synt)
try:
cmd = input(prompt).strip()
except EOFError:
break
if len(cmd) == 0:
continue
# handle special commands
scmd = cmd.lower().split()
if scmd[0] == 'mode':
if len(scmd) != 2:
print("MODE Expected 1 argument")
continue
if scmd[1] not in AP_MODES:
print("Unsupported mode")
continue
mode = scmd[1]
continue
elif scmd[0] == 'arch':
if len(scmd) != 2:
print("ARCH Expected 1 argument")
continue
if scmd[1] not in AP_ARCHS:
print("Unsupported mode")
continue
arch = scmd[1]
# set bits and synt to default
if arch == AP_ARCH_ARM64:
bits = AP_BITS_DEF
synt = AP_SYNT_DEF
if arch == AP_ARCH_ARM:
bits = AP_BITS_ARM
synt = AP_SYNT_DEF
elif arch == AP_ARCH_X86:
bits = AP_BITS_64
synt = AP_SYNT_NASM
continue
elif scmd[0] == 'bits':
if len(scmd) != 2:
print("BITS Expected 1 argument")
continue
if scmd[1] not in AP_BITS:
print("Unsupported mode")
continue
if arch in [AP_ARCH_ARM, AP_ARCH_ARM64] and scmd[1] not in AP_BITS_FOR_ARM:
print("Unsupported bits for current arch")
continue
elif arch == AP_ARCH_X86 and scmd[1] not in AP_BITS_FOR_X86:
print("Unsupported bits for current arch")
continue
bits = scmd[1]
continue
elif scmd[0] == 'synt':
if len(scmd) != 2:
print("SYNT Expected 1 argument")
continue
if scmd[1] not in AP_SYNTS:
print("Unsupported mode")
continue
if arch in [AP_ARCH_ARM, AP_ARCH_ARM64]:
print("SYNT unsupported for current arch")
continue
synt = scmd[1]
continue
elif scmd[0] == 'info':
print(get_info(mode, arch, bits, synt))
continue
elif scmd[0] == 'help':
print(CMDS)
continue
elif scmd[0] == 'quit':
break
plen = len(prompt) - len(PROMPTE)
prompt = (' ' * plen) + PROMPTE
# get rest of input
while True:
n = input(prompt).strip()
if n == "":
break
cmd += '\n' + n
# hand off
try:
if mode == AP_MODE_ASM:
asm = assemble(cmd, arch, bits, synt)
sp = 0x8
nl = 0x10
for i in range(0, len(asm), sp):
print(asm[i:i+sp].hex(), end=' ')
if ((i+sp) % nl) == 0 and (i+sp) < len(asm):
print('')
print('')
elif mode == AP_MODE_DIS:
dis = disassemble_hex(cmd, arch, bits, synt)
print(dis)
elif mode == AP_MODE_EMU:
emulate(cmd, arch, bits, synt)
print('')
except Exception as e:
print(e)
print("")
return
if __name__ == '__main__':
main()
``` |
{
"source": "jordan9001/dobby2",
"score": 2
} |
#### File: dobby2/dobby/dobby_remote.py
```python
from triton import *
from .interface import *
from .dobby import *
#TODO this will be a interface for any remote providers
# I plan to use this with my hypervisor as a really fast provider
class DobbyRemote(DobbyProvider, DobbyEmu, DobbySym, DobbyRegContext, DobbyMem, DobbySnapshot, DobbyFuzzer):
"""
Dobby provider using Triton DSE
"""
def __init__(self, ctx, remotename):
super().__init__(ctx, remotename)
#TODO
raise NotImplementedError(f"TODO")
```
#### File: dobby2/dobby/winsys.py
```python
if __name__ == '__main__':
print("Please import this file from a dobby script")
exit(-1)
import struct
from .dobby import *
from .dobby_const import *
# windows kernel helper functions
def createIrq(ctx, irqtype, inbuf):
raise NotImplementedError("TODO")
def createDrvObj(ctx, start, size, entry, path, name="DriverObj"):
dobjsz = 0x150
d = ctx.alloc(dobjsz)
dex = ctx.alloc(0x50)
dte = ctx.alloc(0x120)
# initialize driver object
# type = 0x4
ctx.setu16(d + 0x00, 0x4)
# size = 0x150
ctx.setu16(d + 0x02, dobjsz)
# DeviceObject = 0
ctx.setu64(d + 0x08, 0x0)
# flags = ??
#TODO
ctx.trySymbolizeMemory(d+0x10, 8, name+".Flags")
# DriverStart = start
ctx.setu64(d + 0x18, start)
# DriverSize = size
ctx.setu32(d + 0x20, size)
# DriverSection = LDR_DATA_TABLE_ENTRY
# not sure what most of these fields are, so we will see what is used
# set up DriverSection
ctx.trySymbolizeMemory(dte+0x0, 0x10, name + ".DriverSection.InLoadOrderLinks")
ctx.trySymbolizeMemory(dte+0x10, 0x10, name + ".DriverSection.InMemoryOrderLinks")
ctx.trySymbolizeMemory(dte+0x20, 0x10, name + ".DriverSection.InInitializationOrderLinks")
ctx.setu64(dte+0x30, start)
ctx.setu64(dte+0x38, entry)
ctx.setu64(dte+0x40, size)
initUnicodeStr(ctx, dte+0x48, path)
initUnicodeStr(ctx, dte+0x58, path.split('\\')[-1])
ctx.trySymbolizeMemory(dte+0x68, 0x8, name + ".DriverSection.Flags")
ctx.trySymbolizeMemory(dte+0x70, 0x10, name + ".DriverSection.HashLinks")
ctx.setu64(dte+0x80, 0) # TimeDateStamp
ctx.trySymbolizeMemory(dte+0x88, 0x8, name + ".DriverSection.EntryPointActivationContext")
ctx.setu64(dte+0x90, 0) # Lock
ctx.trySymbolizeMemory(dte+0x98, 0x8, name + ".DriverSection.DdagNode")
ctx.trySymbolizeMemory(dte+0xa0, 0x10, name + ".DriverSection.NodeModuleLink")
ctx.trySymbolizeMemory(dte+0xb0, 0x8, name + ".DriverSection.LoadContext")
ctx.trySymbolizeMemory(dte+0xb8, 0x8, name + ".DriverSection.ParentDllBase")
ctx.trySymbolizeMemory(dte+0xc0, 0x8, name + ".DriverSection.SwitchBackContext")
ctx.trySymbolizeMemory(dte+0xc8, 0x20, name + ".DriverSection.IndexNodeStuff")
ctx.trySymbolizeMemory(dte+0xf8, 0x8, name + ".DriverSection.OriginalBase")
ctx.trySymbolizeMemory(dte+0x100, 0x8, name + ".DriverSection.LoadTime")
ctx.setu32(dte+0x108, 0) # BaseNameHashValue
ctx.setu32(dte+0x10c, 0) # LoadReasonStaticDependency
ctx.trySymbolizeMemory(dte+0x110, 4, name + ".DriverSection.ImplicitPathOptions")
ctx.setu32(dte+0x118, 0) # DependentLoadFlags
ctx.setu32(dte+0x11c, 0) # SigningLevel
#ctx.trySymbolizeMemory(d+0x28, 8, name+".DriverSection")
ctx.setu64(d+0x28, dte)
# DriverExtension = dex
ctx.setu64(d + 0x30, dex)
# DriverName
initUnicodeStr(ctx, d+0x38, "\\Driver\\" + name)
# HardwareDatabase = ptr str
hd = createUnicodeStr(ctx, "\\REGISTRY\\MACHINE\\HARDWARE\\DESCRIPTION\\SYSTEM")
ctx.setu64(d + 0x48, hd)
# FastIoDispatch = 0
ctx.setu64(d + 0x50, 0x0)
# DriverInit = DriverEntry
ctx.setu64(d + 0x58, entry)
# DriverStartIO = 0
ctx.setu64(d + 0x60, 0x0)
# DriverUnload = 0
ctx.setu64(d + 0x68, 0x0)
# MajorFunctions = 0
ctx.setMemVal(d + 0x70, b"\x00" * 8 * 28)
# initialize driver extension
# ext.DriverObject = d
ctx.setu64(dex + 0x00, d)
# ext.AddDevice = 0
ctx.setu64(dex + 0x08, 0)
# ext.Count = 0
ctx.setu64(dex + 0x10, 0)
# ext.ServiceKeyName
initUnicodeStr(ctx, dex+0x18, name)
# ext.ClientDriverExtension = 0
ctx.setu64(dex + 0x28, 0)
# ext.FsFilterCallbacks = 0
ctx.setu64(dex + 0x30, 0)
# ext.KseCallbacks = 0
ctx.setu64(dex + 0x38, 0)
# ext.DvCallbacks = 0
ctx.setu64(dex + 0x40, 0)
# ext.VerifierContext = 0
ctx.setu64(dex + 0x48, 0)
return d
def createUnicodeStr(ctx, s):
ustr = ctx.alloc(0x10)
initUnicodeStr(ctx, ustr, s)
return ustr
def initUnicodeStr(ctx, addr, s):
us = s.encode("UTF-16-LE")
buf = ctx.alloc(len(us))
ctx.setMemVal(buf, us)
ctx.setu16(addr + 0, len(us))
ctx.setu16(addr + 2, len(us))
ctx.setu64(addr + 0x8, buf)
def readUnicodeStr(ctx, addr):
l = ctx.getu16(addr)
ptr = ctx.getu64(addr+0x8)
if ctx.issym and ctx.isSymbolizedMemory(addr+8, 8):
print("Tried to read from a symbolized buffer in a unicode string")
return ""
b = ctx.getMemVal(ptr, l)
return str(b, "UTF_16_LE")
def setIRQL(ctx, newlevel):
oldirql = ctx.getRegVal(DB_X86_R_CR8)
#TODO save old one at offset from gs see KeRaiseIrqlToDpcLevel
ctx.setRegVal(DB_X86_R_CR8, newlevel)
return oldirql
def KeBugCheckEx_hook(hook, ctx, addr, sz, op, provider):
code = ctx.getRegVal(DB_X86_R_RCX)
print(f"Bug Check! Code: {code:x}. See other 4 params for more info")
return HookRet.FORCE_STOP_INS
def ExAllocatePoolWithTag_hook(hook, ctx, addr, sz, op, provider):
#TODO actually have an allocator? Hope they don't do this a lot
#TODO memory permissions based on pool
pool = ctx.getRegVal(DB_X86_R_RCX)
amt = ctx.getRegVal(DB_X86_R_RDX)
tag = struct.pack("<I", ctx.getRegVal(DB_X86_R_R8))
area = ctx.alloc(amt)
ctx.active.globstate["poolAllocations"].append((pool, amt, tag, area))
print("ExAllocatePoolWithTag", hex(amt), tag, '=', hex(area))
ctx.doRet(area)
return HookRet.OP_DONE_INS
def ExFreePoolWithTag_hook(hook, ctx, addr, sz, op, provider):
#TODO actually do this?
area = ctx.getRegVal(DB_X86_R_RCX)
print("ExFreePoolWithTag", hex(area))
ctx.doRet(area)
return HookRet.OP_DONE_INS
def RtlDuplicateUnicodeString_hook(hook, ctx, addr, sz, op, provider):
add_nul = ctx.getRegVal(DB_X86_R_RCX)
src = ctx.getRegVal(DB_X86_R_RDX)
dst = ctx.getRegVal(DB_X86_R_R8)
# check bounds
if not ctx.inBounds(src, 0x10, MEM_READ):
print("RtlDuplicateUnicodeString: src oob")
return HookRet.STOP_INS
if not ctx.inBounds(dst, 0x10, MEM_WRITE):
print("RtlDuplicateUnicodeString: dst oob")
return HookRet.STOP_INS
numbytes = ctx.getu16(src)
srcbuf = ctx.getu64(src+8)
srcval = b""
if numbytes != 0:
# check buffers
if not ctx.inBounds(srcbuf, numbytes, MEM_READ):
print("RtlDuplicateUnicodeString: src.buf oob")
return HookRet.STOP_INS
for i in range(numbytes):
if ctx.issym and ctx.isSymbolizedMemory(srcbuf+i, 1):
print("RtlDuplicateUnicodeString: symbolized in src.buf")
return HookRet.STOP_INS
srcval = ctx.getMemVal(srcbuf, numbytes)
if add_nul > 1 or (add_nul == 1 and numbytes != 0):
srcval += b"\x00\x00"
if len(srcval) == 0:
# null buffer, 0 len
ctx.setu16(dst + 0x0, 0)
ctx.setu16(dst + 0x2, 0)
ctx.setu64(dst + 0x8, 0)
else:
dstbuf = ctx.alloc(len(srcval))
ctx.setMemVal(dstbuf, srcval)
ctx.setu16(dst + 0x0, numbytes)
ctx.setu16(dst + 0x2, numbytes)
ctx.setu64(dst + 0x8, dstbuf)
s = str(srcval, "UTF_16_LE")
ctx.doRet(0)
print(f"RtlDuplicateUnicodeString : \"{s}\"")
return HookRet.OP_DONE_INS
def IoCreateFileEx_hook(hook, ctx, addr, sz, op, provider):
h = ctx.active.globstate["nexthandle"]
ctx.active.globstate["nexthandle"] += 1
phandle = ctx.getRegVal(DB_X86_R_RCX)
oa = ctx.getRegVal(DB_X86_R_R8)
iosb = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
disp = ctx.getu32(sp + 0x28 + (3 * 8))
driverctx = ctx.getu64(sp + 0x28 + (10 * 8))
if ctx.issym and ctx.isSymbolizedMemory(oa+0x10, 8):
print("Unicode string in object attributes is symbolized")
return HookRet.FORCE_STOP_INS
namep = ctx.getu64(oa+0x10)
name = readUnicodeStr(ctx, namep)
ctx.setu64(phandle, h)
# set up iosb
info = 0
disp_str = ""
if disp == 0:
disp_str = "FILE_SUPERSEDE"
info = 0 # FILE_SUPERSEDED
elif disp == 1:
disp_str = "FILE_OPEN"
info = 1 # FILE_OPENED
elif disp == 2:
disp_str = "FILE_CREATE"
info = 2 # FILE_CREATED
elif disp == 3:
disp_str = "FILE_OPEN_IF"
info = 2 # FILE_CREATED
elif disp == 4:
disp_str = "FILE_OVERWRITE_IF"
info = 3 # FILE_OVERWRITTEN
elif disp == 5:
disp_str = "FILE_OVERWRITE_IF"
info = 2 # FILE_CREATED
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, info)
objinfo = (h, name, disp, driverctx, provider)
ctx.active.globstate["handles"][h] = objinfo
ctx.doRet(0)
print(f"IoCreateFileEx: \"{name}\" {disp_str} = {h}")
return HookRet.STOP_INS
def IoCreateDevice_hook(hook, ctx, addr, sz, op, provider):
drvobj = ctx.getRegVal(DB_X86_R_RCX)
easz = ctx.getRegVal(DB_X86_R_RDX)
dname = ctx.getRegVal(DB_X86_R_R8)
dtype = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
char = ctx.getu32(sp + 0x28 + (0 * 8))
exclusive = ctx.getu64(sp + 0x28 + (1 * 8))
outdev = ctx.getu64(sp + 0x28 + (2 * 8))
name = readUnicodeStr(ctx, dname)
print(f"Driver is trying to create device {name}")
return HookRet.FORCE_STOP_INS
def ZwClose_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
name = ctx.active.globstate["handles"][h][1]
del ctx.active.globstate["handles"][h]
print(f"Closed File {h} ({name})")
ctx.doRet(0)
return HookRet.OP_DONE_INS
def ZwWriteFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
evt = ctx.getRegVal(DB_X86_R_RDX)
apcrou = ctx.getRegVal(DB_X86_R_R8)
apcctx = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
iosb = ctx.getu64(sp + 0x28 + (0 * 8))
buf = ctx.getu64(sp + 0x28 + (1 * 8))
blen = ctx.getu32(sp + 0x28 + (2 * 8))
poff = ctx.getu64(sp + 0x28 + (3 * 8))
if apcrou != 0:
print("ZwWriteFile with apcroutine!")
return HookRet.FORCE_STOP_INS
name = ctx.active.globstate["handles"][h][1]
off = 0
if poff != 0:
off = ctx.getu64(poff)
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, blen)
ctx.doRet(0)
print(f"ZwWriteFile: {h}({name})) {hex(blen)} bytes{(' at offset ' + hex(off)) if poff != 0 else ''}")
ctx.printMem(buf, blen)
return HookRet.OP_DONE_INS
def ZwReadFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
sp = ctx.getRegVal(DB_X86_R_RSP)
iosb = ctx.getu64(sp + 0x28 + (0 * 8))
buf = ctx.getu64(sp + 0x28 + (1 * 8))
blen = ctx.getu32(sp + 0x28 + (2 * 8))
poff = ctx.getu64(sp + 0x28 + (3 * 8))
print(f"ZwReadFile: {h}({name}) {hex(blen)} into {hex(buf)}")
if poff:
offval = ctx.getu64(poff)
print(f"Read is at offset {hex(offval)}")
ctx.doRet(0)
return HookRet.FORCE_STOP_INS
def ZwFlushBuffersFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
iosb = ctx.getRegVal(DB_X86_R_RDX)
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, 0)
print(f"ZwFlushBuffersFile {h}")
ctx.doRet(0)
return HookRet.DONE_INS
def KeAreAllApcsDisabled_hook(hook, ctx, addr, sz, op, provider):
# checks:
# currentthread.SpecialAcpDisable
# KeAreInterruptsEnabled (IF in rflags)
# cr8 == 0
#TODO do all the above checks
cr8val = ctx.getRegVal(DB_X86_R_CR8)
ie = ((ctx.getRegVal(DB_X86_R_EFLAGS) >> 9) & 1)
ret = 0 if cr8val == 0 and ie == 1 else 1
print(f"KeAreAllApcsDisabled : {ret}")
ctx.doRet(ret)
return HookRet.DONE_INS
def KeIpiGenericCall_hook(hook, ctx, addr, sz, op, provider):
fcn = ctx.getRegVal(DB_X86_R_RCX)
arg = ctx.getRegVal(DB_X86_R_RDX)
# set IRQL to IPI_LEVEL
old_level = setIRQL(ctx, 0xe)
# do IpiGeneric Call
ctx.setRegVal(DB_X86_R_RCX, arg)
ctx.setRegVal(DB_X86_R_RIP, fcn)
# set hook for when we finish
def finish_KeIpiGenericCall_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
setIRQL(ctx, old_level)
rval = ctx.getRegVal(DB_X86_R_RAX)
print(f"KeIpiGenericCall returned {hex(rval)}")
return HookRet.OP_CONT_INS
curstack = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(curstack)
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_KeIpiGenericCall_hook, label="")
print(f"KeIpiGenericCall {hex(fcn)} ({hex(arg)})")
return HookRet.OP_DONE_INS
def ZwQuerySystemInformation_hook(hook, ctx, addr, sz, op, provider):
infoclass = ctx.getRegVal(DB_X86_R_RCX)
buf = ctx.getRegVal(DB_X86_R_RDX)
buflen = ctx.getRegVal(DB_X86_R_R8)
retlenptr = ctx.getRegVal(DB_X86_R_R9)
if infoclass == 0x0b: #SystemModuleInformation
# buffer should contain RTL_PROCESS_MODULES structure
raise NotImplementedError(f"Unimplemented infoclass SystemModuleInformation in ZwQuerySystemInformation")
elif infoclass == 0x4d: #SystemModuleInformationEx
# buffer should contain RTL_PROCESS_MODULE_INFORMATION_EX
# has to include the module we are emulating
# just copy over a good buffer from the computer?
# if they actually use the info we are in trouble
# actually load in a bunch of modules? :(
# might have to support paging in/out if that needs to happen
# for now just try a good value
# see side_utils for doing this from python to get example output
# TODO provide a good output, but symbolize any real addresses
raise NotImplementedError(f"Unimplemented infoclass SystemModuleInformationEx in ZwQuerySystemInformation")
else:
raise NotImplementedError(f"Unimplemented infoclass in ZwQuerySystemInformation : {hex(infoclass)}")
def ExSystemTimeToLocalTime_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr0"])
print("ExSystemTimeToLocalTime")
return HookRet.DONE_INS
def RtlTimeToTimeFields_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr1"])
print("RtlTimeToTimeFields")
return HookRet.DONE_INS
def _stricmp_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr2"])
s1addr = ctx.getRegVal(DB_X86_R_RCX)
s2addr = ctx.getRegVal(DB_X86_R_RDX)
s1 = ctx.getCStr(s1addr)
s2 = ctx.getCStr(s2addr)
print(f"_stricmp \"{s1}\" vs \"{s2}\"")
return HookRet.OP_DONE_INS
def wcscat_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr3"])
s1addr = ctx.getRegVal(DB_X86_R_RCX)
s2addr = ctx.getRegVal(DB_X86_R_R8)
num = ctx.getRegVal(DB_X86_R_RDX)
s1 = ctx.getCWStr(s1addr)
s2 = ctx.getCWStr(s2addr)
print(f"wcscat_s ({num}) \"{s1}\" += \"{s2}\"")
return HookRet.OP_DONE_INS
def wcscpy_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr4"])
dst = ctx.getRegVal(DB_X86_R_RCX)
src = ctx.getRegVal(DB_X86_R_R8)
num = ctx.getRegVal(DB_X86_R_RDX)
s = ctx.getCWStr(src)
print(f"wcscpy_s {hex(dst)[2:]}({num}) <= \"{s}\"")
return HookRet.OP_DONE_INS
def RtlInitUnicodeString_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr5"])
src = ctx.getRegVal(DB_X86_R_RDX)
s = ctx.getCWStr(src)
print(f"RtlInitUnicodeString \"{s}\"")
return HookRet.OP_DONE_INS
def swprintf_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr6"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish_swprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished swprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_swprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def vswprintf_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr7"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish_vswprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished vswprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_vswprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def _vsnwprintf_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr8"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish__vsnwprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished _vsnwprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish__vsnwprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def createThunkHooks(ctx):
# have to be in higher scope for pickling the hooks
name = "ExSystemTimeToLocalTime"
ctx.active.globstate["_thunk_symaddr0"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, ExSystemTimeToLocalTime_hook, "ignore")
name = "RtlTimeToTimeFields"
ctx.active.globstate["_thunk_symaddr1"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, RtlTimeToTimeFields_hook, "ignore")
name = "_stricmp"
ctx.active.globstate["_thunk_symaddr2"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, _stricmp_hook, "ignore")
name = "wcscat_s"
ctx.active.globstate["_thunk_symaddr3"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, wcscat_s_hook, "ignore")
name = "wcscpy_s"
ctx.active.globstate["_thunk_symaddr4"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, wcscpy_s_hook, "ignore")
name = "RtlInitUnicodeString"
ctx.active.globstate["_thunk_symaddr5"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, RtlInitUnicodeString_hook, "ignore")
name = "swprintf_s"
ctx.active.globstate["_thunk_symaddr6"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, swprintf_s_hook, "ignore")
name = "vswprintf_s"
ctx.active.globstate["_thunk_symaddr7"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, vswprintf_s_hook, "ignore")
name = "_vsnwprintf"
ctx.active.globstate["_thunk_symaddr8"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, _vsnwprintf_hook, "ignore")
def setNtosThunkHook(ctx, name, dostop):
ctx.setApiHandler(name, ctx.createThunkHook(name, "ntoskrnl.exe", dostop), "ignore")
def registerWinHooks(ctx):
ctx.setApiHandler("RtlDuplicateUnicodeString", RtlDuplicateUnicodeString_hook, "ignore")
ctx.setApiHandler("KeBugCheckEx", KeBugCheckEx_hook, "ignore")
ctx.setApiHandler("ExAllocatePoolWithTag", ExAllocatePoolWithTag_hook, "ignore")
ctx.setApiHandler("ExFreePoolWithTag", ExFreePoolWithTag_hook, "ignore")
ctx.setApiHandler("IoCreateFileEx", IoCreateFileEx_hook, "ignore")
ctx.setApiHandler("ZwClose", ZwClose_hook, "ignore")
ctx.setApiHandler("ZwWriteFile", ZwWriteFile_hook, "ignore")
ctx.setApiHandler("ZwReadFile", ZwReadFile_hook, "ignore")
ctx.setApiHandler("ZwFlushBuffersFile", ZwFlushBuffersFile_hook, "ignore")
ctx.setApiHandler("KeAreAllApcsDisabled", KeAreAllApcsDisabled_hook, "ignore")
ctx.setApiHandler("KeIpiGenericCall", KeIpiGenericCall_hook, "ignore")
ctx.setApiHandler("IoCreateDevice", IoCreateDevice_hook, "ignore")
createThunkHooks(ctx)
def loadNtos(ctx, base=0xfffff8026be00000):
# NOTE just because we load ntos doesn't mean it is initialized at all
# Make sure you initalize the components you intend to use
print("Loading nt...")
ctx.loadPE("ntoskrnl.exe", base)
print("Loaded!")
def kuser_time_hook(hk, ctx, addr, sz, op, provider):
# InterruptTime is 100ns scale time since start
it = ctx.getTicks()
# SystemTime is 100ns scale, as timestamp
st = ctx.getTime()
# TickCount is 1ms scale, as ticks update as if interrupts have maximum period?
# TODO adjust this?
tc = int(it // 10000)
shared_data_addr = 0xfffff78000000000
# write the values back
bts = struct.pack("<QI", tc, tc>>32)
ctx.setMemVal(shared_data_addr + 0x320, bts)
bts = struct.pack("<QIQI", it, it>>32, st, st>>32)
ctx.setMemVal(shared_data_addr + 0x8, bts)
if shared_data_addr + 0x8 <= addr < shared_data_addr + 0x14:
print("Read from InterruptTime")
if shared_data_addr + 0x14 <= addr < shared_data_addr + 0x20:
print("Read from SystemTime")
if shared_data_addr + 0x320 <= addr < shared_data_addr + 0x330:
print("Read from TickCount")
return HookRet.CONT_INS
def initSys(ctx):
# setup global state we track
ctx.active.globstate["poolAllocations"] = [] # see ExAllocatePoolWithTag
ctx.active.globstate["handles"] = {} # number : (object,)
ctx.active.globstate["nexthandle"] = 1
loadNtos(ctx)
registerWinHooks(ctx)
# setup KUSER_SHARED_DATA at 0xFFFFF78000000000
shared_data_addr = 0xfffff78000000000
shared_data_sz = 0x720
ctx.addAnn(shared_data_addr, shared_data_addr + shared_data_sz, "GLOBAL", "_KUSER_SHARED_DATA")
ctx.updateBounds(shared_data_addr, shared_data_addr + shared_data_sz, MEM_READ, False)
#TODO verify tick count/time works how you think
# time is # of 100-nanosecond intervals
# these numbers aren't actually any good because we hook out a looot of functionality?
# but eh, if things don't work then use a volatile symbol hook here
ctx.addHook(shared_data_addr + 0x8, shared_data_addr+0x20, MEM_READ, kuser_time_hook, "Interrupt and System Time hook")
ctx.addHook(shared_data_addr + 0x320, shared_data_addr+0x32c, MEM_READ, kuser_time_hook, "Tick Time hook")
ctx.setMemVal(
shared_data_addr + 0x0,
b'\x00\x00\x00\x00' + # +0x0 .TickCountLowDeprecated
b'\x00\x00\xa0\x0f' + # +0x4 .TickCountMultiplier
# HOOK THIS and use instruction count to add to it
b'O\xcaW[\xd8\x05\x00\x00\xd8\x05\x00\x00' + # +0x8 .InterruptTime
# HOOK THIS and use instruction count to add to it
b'\x19E~M\xe7\x8c\xd6\x01\xe7\x8c\xd6\x01' + # +0x14 .SystemTime
b'\x00\xa0\x11\x87!\x00\x00\x00!\x00\x00\x00' + # +0x20 .TimeZoneBias
b'd\x86' + # +0x2c .ImageNumberLow
b'd\x86' + # +0x2e .ImageNumberHigh
b'C\x00:\x00\\\x00W\x00I\x00N\x00D\x00O\x00' + # +0x30 .NtSystemRoot
b'W\x00S\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00' + # +0x238 .MaxStackTraceDepth
b'\x00\x00\x00\x00' + # +0x23c .CryptoExponent
b'\x02\x00\x00\x00' + # +0x240 .TimeZoneId
b'\x00\x00 \x00' + # +0x244 .LargePageMinimum
b'\x00\x00\x00\x00' + # +0x248 .AitSamplingValue
b'\x00\x00\x00\x00' + # +0x24c .AppCompatFlag
b'I\x00\x00\x00\x00\x00\x00\x00' + # +0x250 .RNGSeedVersion
b'\x00\x00\x00\x00' + # +0x258 .GlobalValidationRunlevel
b'\x1c\x00\x00\x00' + # +0x25c .TimeZoneBiasStamp
b'aJ\x00\x00' + # +0x260 .NtBuildNumber
b'\x01\x00\x00\x00' + # +0x264 .NtProductType
b'\x01' + # +0x268 .ProductTypeIsValid
b'\x00' + # +0x269 .Reserved0
b'\t\x00' + # +0x26a .NativeProcessorArchitecture
b'\n\x00\x00\x00' + # +0x26c .NtMajorVersion
b'\x00\x00\x00\x00' + # +0x270 .NtMinorVersion
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x274, 0x4), "kuser_shared_data.ProcessorFeature[0:4]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x278, 0x8), "kuser_shared_data.ProcessorFeature[4:c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x280, 0x20), "kuser_shared_data.ProcessorFeature[c:2c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2a0, 0x10), "kuser_shared_data.ProcessorFeature[2c:3c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2b0, 0x8), "kuser_shared_data.ProcessorFeature[3c:44]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2b8, 0x4), "kuser_shared_data.reserved3")
b'\x00\x00\x01\x01\x00\x00\x01\x00\x01\x01\x01\x00\x01\x01\x01\x00' + # +0x274 .ProcessorFeatures
b'\x00\x01\x00\x00\x00\x01\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00' +
b'\x01\x01\x00\x00\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\xff\xff\xfe\x7f' + # +0x2b4 .Reserved1
b'\x00\x00\x00\x80' + # +0x2b8 .Reserved3
b'\x00\x00\x00\x00' + # +0x2bc .TimeSlip
b'\x00\x00\x00\x00' + # +0x2c0 .AlternativeArchitecture
b' \x00\x00\x00' + # +0x2c4 .BootId
b'\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x2c8 .SystemExpirationDate
b'\x10\x03\x00\x00' + # +0x2d0 .SuiteMask
# Yeah, go ahead and keep this one a 0
b'\x00' + # +0x2d4 .KdDebuggerEnabled # Yeah, go ahead and keep this one a 0
b'\n' + # +0x2d5 .Reserved
b'<\x00' + # +0x2d6 .CyclesPerYield
b'\x01\x00\x00\x00' + # +0x2d8 .ActiveConsoleId
b'\x04\x00\x00\x00' + # +0x2dc .DismountCount
b'\x01\x00\x00\x00' # +0x2e0 .ComPlusPackage
)
#TODO hook this properly
ctx.trySymbolizeMemory(shared_data_addr + 0x2e4, 0x4, "kuser_shared_data.LastSystemRITEventTickCount")
#b'\xc9\x85N&' + # +0x2e4 .LastSystemRITEventTickCount
ctx.setMemVal(
shared_data_addr + 0x2e8,
b'\x94\xbb?\x00' + # +0x2e8 .NumberOfPhysicalPages
b'\x00' + # +0x2ec .SafeBootMode
b'\x01' + # +0x2ed .VirtualizationFlags #TODO worth symbolizing?
b'\x00\x00' + # +0x2ee .Reserved12
#TODO should any of these be changed?
# ULONG DbgErrorPortPresent : 1;
# ULONG DbgElevationEnabled : 1; // second bit
# ULONG DbgVirtEnabled : 1; // third bit
# ULONG DbgInstallerDetectEnabled : 1; // fourth bit
# ULONG DbgSystemDllRelocated : 1;
# ULONG DbgDynProcessorEnabled : 1;
# ULONG DbgSEHValidationEnabled : 1;
# ULONG SpareBits : 25;
b'\x0e\x01\x00\x00' + # +0x2f0 .SpareBits
b'\x00\x00\x00\x00' + # +0x2f4 .DataFlagsPad
b'\xc3\x00\x00\x00\x00\x00\x00\x00' + # +0x2f8 .TestRetInstruction
b'\x80\x96\x98\x00\x00\x00\x00\x00' + # +0x300 .QpcFrequency
b'\x00\x00\x00\x00' + # +0x308 .SystemCall
b'\x00\x00\x00\x00' + # +0x30c .UserCetAvailableEnvironments
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x310 .SystemCallPad
# HOOK THIS and use instruction count to add to it
b'\x17\x9es\x02\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x320 .ReservedTickCountOverlay
b'\x00\x00\x00\x00' + # +0x32c .TickCountPad
b'\xd3PB\x1b' + # +0x330 .Cookie
b'\x00\x00\x00\x00' + # +0x334 .CookiePad
b'\xbc\x1d\x00\x00\x00\x00\x00\x00' + # +0x338 .ConsoleSessionForegroundProcessId
#TODO hook this?
b'\xa2{H\x1a\x00\x00\x00\x00' + # +0x340 .TimeUpdateLock
b'-\x83\x87[\xd8\x05\x00\x00' + # +0x348 .BaselineSystemTimeQpc
b'-\x83\x87[\xd8\x05\x00\x00' + # +0x350 .BaselineInterruptTimeQpc
b'\x00\x00\x00\x00\x00\x00\x00\x80' + # +0x358 .QpcSystemTimeIncrement
b'\x00\x00\x00\x00\x00\x00\x00\x80' + # +0x360 .QpcInterruptTimeIncrement
b'\x01' + # +0x368 .QpcSystemTimeIncrementShift
b'\x01' + # +0x369 .QpcInterruptTimeIncrementShift
b'\x18\x00' + # +0x36a .UnparkedProcessorCount
b'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x36c .EnclaveFeatureMask
b'\x03\x00\x00\x00' + # +0x37c .TelemetryCoverageRound
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x380 .UserModeGlobalLogger
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00' + # +0x3a0 .ImageFileExecutionOptions
b'\x01\x00\x00\x00' + # +0x3a4 .LangGenerationCount
b'\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x3a8 .Reserved4
b'\x17\xfc\x9eU\xd8\x03\x00\x00' + # +0x3b0 .InterruptTimeBias
b'\xcd"\x15G\xd8\x03\x00\x00' + # +0x3b8 .QpcBias
b'\x18\x00\x00\x00' + # +0x3c0 .ActiveProcessorCount
b'\x01' + # +0x3c4 .ActiveGroupCount
b'\x00' + # +0x3c5 .Reserved9
b'\x83' + # +0x3c6 .QpcBypassEnabled
b'\x00' + # +0x3c7 .QpcShift
b'\x9a,\x17\xcdq\x8c\xd6\x01' + # +0x3c8 .TimeZoneBiasEffectiveStart
b'\x000\x9d;\x14\xb0\xd6\x01' + # +0x3d0 .TimeZoneBiasEffectiveEnd
b'\x07\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00' + # +0x3d8 .XState
b'@\x03\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xa0\x00\x00\x00' +
b'\xa0\x00\x00\x00\x00\x01\x00\x00@\x02\x00\x00\x00\x01\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00@\x03\x00\x00\xa0\x00\x00\x00' +
b'\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x710 .FeatureConfigurationChangeStamp
b'\x00\x00\x00\x00' # +0x71c .Spare
)
# setup KPCR and KPRCB
#TODO
```
#### File: dobby2/other_tools/side_utils.py
```python
def parse_dt(dtstr):
out = []
last_lvl = [0]
#TODO handle unions when we handle nested types
for l in dtstr.split('\n'):
if not l.strip().startswith("+0x"):
#TODO
continue
if not l.startswith(' '):
print("line =", l)
#TODO
raise NotImplementedError("Need to implement nested type dt parsing")
ll = l.split()
if ll[0].startswith('+0x'):
out.append((int(ll[0][3:],16), ll[1]))
if out[0][0] != 0:
raise TypeError("dtstr does not have a type that starts at zero?")
return out
def parse_db(dbstr):
out = b""
for l in dbstr.split('\n'):
out += bytes.fromhex(l.split(" ")[1].replace('-', ''))
return out
def gen_sym_type(dtstr, typesize, addr, name):
#TODO allow addr to be a variable name
dt = parse_dt(dtstr)
out = ""
for i in range(len(dt)):
e = typesize
if i != (len(dt)-1):
e = dt[i+1][0]
sz = e - dt[i][0]
# hopefully it fits in a MemoryAccess size
out += f"ctx.symbolizeMemory(MemoryAccess( {hex(addr + dt[i][0])} , {hex(sz)} ), \"{name + '.' + dt[i][1]}\")\n"
return out
def gen_mem_init(dtstr, dbstr, addr, name=""):
#TODO allow addr to be a variable name
dt = parse_dt(dtstr)
db = parse_db(dbstr)
typesize = len(db)
out = ""
for i in range(len(dt)):
e = typesize
if i != (len(dt)-1):
e = dt[i+1][0]
s = dt[i][0]
out += "ctx.api.setConcreteMemoryAreaValue(\n"
out += f" {hex(addr + dt[i][0])}, # {name + '.' + dt[i][1]}\n"
out += f" bytes.fromhex(\"{db[s:e].hex()}\")\n"
out += ")\n"
return out
def gen_commented_memdmp(dtstr, dbstr):
dt = parse_dt(dtstr)
db = parse_db(dbstr)
# cut up where we have
out = ""
for d in dt[::-1]:
b = db[d[0]:]
bl = [ b[i:i+0x10] for i in range(0x0, len(b), 0x10) ]
first = True
line = ""
for bi in bl:
line += str(bi)
line += " + "
if first:
line += " # +" + hex(d[0]) + " ." + d[1]
line += "\n"
first = False
db = db[:d[0]]
out = line + out
out += "b\"\"\n"
return out
import ctypes
import struct
def QuerySysInfo(infoclass=0x4d):
retsz = ctypes.c_ulong(0)
retszptr = ctypes.pointer(retsz)
ctypes.windll.ntdll.NtQuerySystemInformation(infoclass, 0, 0, retszptr)
buf = (ctypes.c_byte * retsz.value)()
ctypes.windll.ntdll.NtQuerySystemInformation(infoclass, buf, len(buf), retszptr)
return bytes(buf)
def quickhex(chunk):
spced = ' '.join([chunk[i:i+1].hex() for i in range(len(chunk))])
fourd = ' '.join([spced[i:i+(4*3)] for i in range(0, len(spced), (4*3))])
sxtnd = '\n'.join([fourd[i:i+(((4*3)+2)*4)] for i in range(0, len(fourd), (((4*3)+2)*4))])
print(sxtnd)
def parseModInfoEx(infobuf):
#TODO
fmt = "<HHIQQQIIHHHH256sIIQ"
off = 0
modinfo = []
while True:
nextoff = struct.unpack("<H", infobuf[off:off+2])[0]
if nextoff == 0:
break
vals = struct.unpack(fmt, infobuf[off:off+struct.calcsize(fmt)])
(
nextoff,
pad, pad,
section,
mapbase,
imgbase,
imgsz,
flags,
loadorder,
initorder,
loadcount,
nameoff,
pathname,
chksum,
timedate,
defbase,
) = vals
pend = pathname.find(b'\x00')
if pend != -1:
pathname = pathname[:pend]
name = pathname[nameoff:]
modinfo.append({
"Section": section,
"MappedBase": mapbase,
"ImageBase": imgbase,
"ImageSize": imgsz,
"Flags": flags,
"LoadOrderIndex": loadorder,
"InitOrderIndex": initorder,
"LoadCount": loadcount,
"Name": name,
"Path": pathname,
"ImageChecksum": chksum,
"TimeDataStamp": timedate,
"DefaultBase": defbase,
})
off += nextoff
return modinfo
``` |
{
"source": "Jordan9675/KBirthdayBot",
"score": 3
} |
#### File: Jordan9675/KBirthdayBot/main.py
```python
import logging
from birthday_bot.birthday import (generate_birthday_message,
get_todays_birthdays)
from birthday_bot.google_images import (DOWNLOADER_OUTPUT_DIR,
download_idol_picture)
from birthday_bot.twitter_bot import TwitterBot
from birthday_bot.utils import delete_folder
logging.basicConfig(level=logging.INFO)
bot = TwitterBot()
def main() -> None:
if not bot.has_posted_today():
birthdays = get_todays_birthdays()
for birthday in birthdays:
idol_name, idol_group = birthday["idolName"], birthday["groupName"]
message = generate_birthday_message(idol_name, idol_group)
picture_path = download_idol_picture(idol_name, idol_group)
bot.tweet_with_picture(message, picture_path)
delete_folder(DOWNLOADER_OUTPUT_DIR)
else:
logging.info("Nothing posted because already posted today")
if __name__ == "__main__":
main()
``` |
{
"source": "JordanAceto/quad_ADSR_board",
"score": 3
} |
#### File: firmware/utils/lookup_table_gen.py
```python
import argparse
import numpy as np
from matplotlib import pyplot as plt
# The output file paths for the lookup table header/source files to generate.
HEADER_OUTPUT_FILE_PATH = "../lib/lookup_tables.h"
SOURCE_OUTPUT_FILE_PATH = "../lib/lookup_tables.c"
# the lookup table size, must match the ADSR_LOOK_UP_TABLE_TABLE_SIZE and
# NUM_INDEX_BITS_IN_ACCUMULATOR definitions in adsr.c, where ADSR_LOOK_UP_TABLE_TABLE_SIZE
# is 2^NUM_INDEX_BITS_IN_ACCUMULATOR
LUT_SIZE = 2**10
# arguments decide whether to plot the curves or write the c files, and allow the
# user to modify the shape of the curves
parser = argparse.ArgumentParser()
parser.add_argument(
'action',
help='the action to do, either graphically plot the LUT, or generate and write the c files',
choices=['plot', 'write']
)
parser.add_argument(
'attack_target',
help='the target value for the attack curve, useful range: [1..5]',
nargs='?',
default=3.0,
type=float
)
parser.add_argument(
'num_time_constants',
help='the number of time constants to extend the attack and decay curves, useful range: [3..10]',
nargs='?',
default=4.0,
type=float
)
args = parser.parse_args()
# the size of the optional plotted figure
plt.rcParams['figure.figsize'] = (8.0, 6.0)
# the target for the attack curve, flattens out the attack curve like an analog ADSR
# which sets a target for the attack curve higher than the trip point
# Example: the attack target is often 15 volts, but the comparator trips at 10 volts
# bigger number means flatter attack curve, adjust to taste
ATTACK_TARGET = args.attack_target
# how far out to take the curves in the LUT, too short and the curves will not
# have time to decay gracefully, too long and all the action will be bunched up
# in the front of the LUT
NUM_TIME_CONSTANTS = args.num_time_constants
# the linear input to transform into the attack and decay curves
X = np.linspace(0, NUM_TIME_CONSTANTS, LUT_SIZE)
# generate the attack curve, range [0, 1], increasing truncated rc curve
y_a = 1 - np.exp(-X / ATTACK_TARGET)
y_a = y_a / y_a.max() # range [0, 1]
# generate the decay/release curve, range [0, 1], decreasing rc curve
y_d = np.exp(-X)
y_d = y_d - y_d.min() # end at zero
y_d = y_d / y_d.max() # range [0, 1]
if (args.action == 'plot'): # graphically plot the curves
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(np.linspace(0, LUT_SIZE-1, LUT_SIZE), y_a, label="attack")
ax.plot(np.linspace(0, LUT_SIZE-1, LUT_SIZE), y_d, label="decay")
plt.suptitle(f"Attack and decay lookup tables with {LUT_SIZE} points")
plt.title(f'attack target: {ATTACK_TARGET}\nnum time constants: {NUM_TIME_CONSTANTS}')
plt.xlabel("LUT index")
plt.ylabel("value")
plt.legend()
plt.show()
elif (args.action == 'write'): # write the c-header file
print(f"generating {HEADER_OUTPUT_FILE_PATH} file...")
ATTACK_TABLE_TYPE = f'const float ADSR_ATTACK_TABLE[{LUT_SIZE}]'
DECAY_TABLE_TYPE = f'const float ADSR_DECAY_TABLE[{LUT_SIZE}]'
h_file_content = '/* FILE AUTOMATICALLY GENERATED BY: /utils/lookup_table_gen.py */\n\n\
#ifndef LOOKUP_TABLES_H_INCLUDED\n\
#define LOOKUP_TABLES_H_INCLUDED\n\n\
// the attack curve lookup table\n\
extern ' + ATTACK_TABLE_TYPE + ';\n\n\
// the decay curve lookup table, used for decay and release curves\n\
extern ' + DECAY_TABLE_TYPE + ';\n\n\
#endif'
with open(HEADER_OUTPUT_FILE_PATH, 'w') as writer:
writer.write(h_file_content)
# write the c-source file
print(f"generating {SOURCE_OUTPUT_FILE_PATH} file...")
source_beginning = f'/* FILE AUTOMATICALLY GENERATED BY: /utils/lookup_table_gen.py */\n\n#include "lookup_tables.h" \n\n'
end_of_lut = '\n};\n'
with open(SOURCE_OUTPUT_FILE_PATH, 'w') as writer:
writer.write(source_beginning)
writer.write(ATTACK_TABLE_TYPE + ' = {\n')
writer.writelines("\t%s,\n" % y for y in y_a)
writer.write(end_of_lut + '\n')
writer.write(DECAY_TABLE_TYPE + ' = {\n')
writer.writelines("\t%s,\n" % y for y in y_d)
writer.write(end_of_lut)
``` |
{
"source": "Jordan-AG/Still-Clock",
"score": 4
} |
#### File: Jordan-AG/Still-Clock/DisplayClock.py
```python
from tkinter import *
from StillClock import StillClock
class DisplayClock:
def __init__(self):
window = Tk()
window.title("Change Clock Time")
self.clock = StillClock(window)
self.clock.pack()
frame = Frame(window)
frame.pack()
Label(frame, text="Hour: ").pack(side=LEFT)
self.hour = IntVar()
self.hour.set(self.clock.getHour())
Entry(frame, textvariable=self.hour,
width=2).pack(side=LEFT)
Label(frame, text="Minute: ").pack(side=LEFT)
self.minute = IntVar()
self.minute.set(self.clock.getMinute())
Entry(frame, textvariable=self.minute,
width=2).pack(side=LEFT)
Label(frame, text="Second: ").pack(side=LEFT)
self.second = IntVar()
self.second.set(self.clock.getSecond())
Entry(frame, textvariable=self.second,
width=2).pack(side=LEFT)
Button(frame, text="Set New Time",
command=self.setNewTime).pack(side=LEFT)
window.mainloop()
def setNewTime(self):
self.clock.setHour(self.hour.get())
self.clock.setMinute(self.minute.get())
self.clock.setSecond(self.second.get())
DisplayClock()
``` |
{
"source": "jordanbean-msft/durable-func-and-aci",
"score": 2
} |
#### File: orchestrator/Compute/__init__.py
```python
import os, json
from azure.storage.blob import (
BlobServiceClient
)
from azure.storage.queue import (
QueueClient
)
def main(input) -> str:
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
queue_name = os.getenv("AZURE_STORAGE_QUEUE_NAME")
input_blob_container_name = os.getenv("AZURE_STORAGE_INPUT_BLOB_CONTAINER_NAME")
queue_client = QueueClient.from_connection_string(connection_string, queue_name)
blob_service_client = BlobServiceClient.from_connection_string(connection_string)
path_to_output_file = f"{input.instance_id}/{input.input_id}.json"
output_blob_client = blob_service_client.get_blob_client(container=input_blob_container_name, blob=path_to_output_file)
output_json = json.dumps(input, default=lambda o: o.__dict__, sort_keys=True, indent=4)
output_blob_client.upload_blob(output_json)
queue_message = {
"input_id": input.input_id,
"instance_id": input.instance_id,
"path_to_data": path_to_output_file
}
queue_client.send_message(json.dumps(queue_message, indent=4))
return "Complete"
``` |
{
"source": "jordanbegg/benchmark",
"score": 2
} |
#### File: benchmark/bootlets/boots.py
```python
from flask import url_for, request
from flask_bootstrap import is_hidden_field_filter
from . import html
from .boots_base import Boot
from .funcs import try_draw
class Container(Boot):
defaults = {"_inline": False}
_block = "{content}"
def build(self):
if self.get("_inline"):
joinstr = " "
else:
joinstr = "\n"
return joinstr.join([try_draw(item) for item in self])
class Alert(Boot):
_class = "alert"
defaults = {"_context": "primary", "role": "alert"}
def build_classes(self):
return f'alert-{self.get("_context")}'
def build(self):
return html.Div(*self.args, **self.get_kwargs())
class AlertHeading(Boot):
_class = "alert-heading"
defaults = {
"_size": 4,
}
def build(self):
return html.H(*self.args, **self.get_kwargs())
class AlertDismissButton(Boot):
_class = "close"
defaults = {"type": "button", "data-dismiss": "alert", "aria-label": "Close"}
def build(self):
return html.Button(
html.Span("×", aria_hidden="true"), **self.get_kwargs()
)
class Badge(Boot):
_class = "badge"
defaults = {"_context": "primary"}
def get_class(self):
return f'{self._class} badge-{self.get("_context")}'
def build(self):
return html.Span(*self.args, **self.get_kwargs())
class BadgePill(Badge):
_class = "badge badge-pill"
class LinkBadge(Boot):
_class = "badge"
defaults = {
"_context": "primary",
"href": "#",
}
def build_class(self):
return f'badge-{self.get("_context")}'
def build(self):
return html.A(*self.args, **self.get_kwargs())
class BreadcrumbItem(Boot):
_class = "breadcrumb-item"
defaults = {"_active": False}
def build_classes(self):
if self.get("_active"):
return "active"
return ""
def build(self):
return html.Li(*self.args, **self.get_kwargs())
class Breadcrumb(Boot):
_class = "breadcrumb"
Li = BreadcrumbItem
Ol = html.Ol
Nav = html.Nav(aria_label="breadcrumb")
def build(self):
items = []
for arg in self.args[:-1]:
items.append(self.Li(arg))
items.append(self.Li(self.args[-1], _active=True, aria_current="page"))
return self.Nav(self.Ol(*items, **self.get_kwargs()))
class Button(Boot):
defaults = {
"_context": "primary",
"_size": "md",
"_outline": False,
"_block": False,
"_disabled": False,
"type": "button",
}
sizes = {"sm": "btn-sm", "md": "", "lg": "btn-lg"}
def get_class(self):
s = "btn"
size_class = self.sizes.get(self.get("_size"))
if size_class:
s += " " + size_class
s += " btn-"
if self.get("_outline"):
s += "outline-"
s += self.get("_context")
if self.get("_block"):
s += " btn-block"
if self.get("_disabled"):
s += " disabled"
return s
def build(self):
return html.Button(*self.args, **self.get_kwargs())
class ButtonLink(Boot):
defaults = {
"_context": "primary",
"_size": "md",
"_outline": False,
"_block": False,
"_disabled": False,
"type": "button",
}
sizes = {"sm": "btn-sm", "md": "", "lg": "btn-lg"}
def get_class(self):
s = "btn"
size_class = self.sizes.get(self.get("_size"))
if size_class:
s += " " + size_class
s += " btn-"
if self.get("_outline"):
s += "outline-"
s += self.get("_context")
if self.get("_block"):
s += " btn-block"
if self.get("_disabled"):
s += " disabled"
return s
def build(self):
return html.A(*self.args, **self.get_kwargs())
class ButtonGroup(Boot):
defaults = {
"_vertical": False,
"role": "group",
"size": "md",
"aria-label": "myButtonGroup",
}
sizes = {"sm": "btn-group-sm", "md": "", "lg": "btn-group-lg"}
def get_class(self):
s = "btn-group"
if self.get("_vertical"):
s += "-vertical"
size_class = self.sizes.get(self.get("_size"))
if size_class:
s += " " + size_class
return s
class Card(Boot):
_class = "card mb-3"
class CardBody(Boot):
_class = "card-body"
class CardHeader(Boot):
_class = "card-header"
defaults = {"_size": 4}
def build(self):
return html.H(*self.args, _size=self.get("_size"), **self.get_kwargs())
class CardFooter(Boot):
_class = "card-footer"
class CardTitle(Boot):
_class = "card-title"
defaults = {"_size": 5}
def build(self):
return html.H(*self.args, _size=self.get("_size"), **self.get_kwargs())
class CardText(Boot):
_class = "card-text"
def build(self):
return html.P(*self.args, **self.get_kwargs())
class CardImage(Boot):
defaults = {
"_location": "top",
"src": "#",
"alt": "myCardImage",
}
def build_class(self):
return f'card-img-{self.get("location")}'
def build(self):
return html.Img(**self.get_kwargs())
class DescriptionList(Boot):
defaults = {"_column_widths": (3, 9)}
def build(self):
if len(self.args) == 1:
arg = self.args[0]
if isinstance(arg, dict):
iterable = arg.items()
else:
iterable = arg
else:
iterable = self.args
lhs_col_size = self.get("_column_widths")[0]
rhs_col_size = self.get("_column_widths")[1]
return Container(
html.Dl(class_="row")(
*[
Container(
html.Dt(class_=f"col-sm-{lhs_col_size}")(dt),
html.Dd(class_=f"col-sm-{rhs_col_size}")(dd),
)
for dt, dd in iterable
]
)
)
class ListGroup(Boot):
_class = "list-group"
Li = html.Li
defaults = {"_flush": False, "_li_class": "list-group-item"}
def build_classes(self):
s = ""
if self.get("_flush"):
s += " list-group-flush"
return s
def build(self):
return html.Ul(
*[self.Li(arg, class_=self.get("_li_class")) for arg in self.args],
**self.get_kwargs(),
)
class Accordian(Boot):
_class = "accordian"
defaults = {"id": "myAccordian"}
class Dropdown(Boot):
_class = "dropdown"
class DropdownItem(Boot):
_class = "dropdown-item"
defaults = {
"href": "#",
}
def build(self):
return html.A(*self.args, **self.get_kwargs())
class DropdownMenu(Boot):
_class = "dropdown-menu"
defaults = {
"aria-labelledby": "myDropdownMenu",
}
class DropdownDivider(Boot):
_class = "dropdown-divider"
class DropdownButton(Boot):
_class = "btn"
defaults = {
"_context": "primary",
"data-toggle": "dropdown",
"aria-haspopup": "true",
"aria-expanded": "false",
}
def build_class(self):
return "btn-" + self.get("_context") + "dropdown-toggle"
def build(self):
return html.Button(*self.args, **self.get_kwargs())
class Table(Boot):
_class = "table table-hover"
defaults = {
"_headers": [],
"_rows": [],
"_hover": True,
}
def build(self):
headers = html.THead(
html.Tr(*[html.Th(i, scope="col") for i in self.get("_headers")])
)
rows = html.TBody(
*[html.Tr(*[html.Td(i) for i in row]) for row in self.get("_rows")]
)
return html.Table(headers, rows, **self.get_kwargs())
class FormField(Boot):
defaults = {
"_form_type": "basic",
"_button_map": {},
}
def build(self):
field = self.args[0]
if field.type == "SubmitField":
btn_cls = self.get("_button_map").get(field.name, "primary")
return field(class_=f"btn btn-{btn_cls}")
# return html.Button(class_=f'btn btn-{btn_cls}')
elif field.type == "RadioField":
return Container(
*[html.Div(class_="form-check")(item(), item.label()) for item in field]
)
elif field.type == "FormField":
return html.FieldSet()(
html.Legend()(field.label),
*[
FormField(
item,
_form_type=self.get("_form_type"),
_button_map=self.get("_button_map"),
)
for item in field
if not is_hidden_field_filter(item)
],
)
elif field.type == "BooleanField":
return html.Div(class_="form-group form-check")(
field(class_="form-check-input"), field.label(class_="form-check-label")
)
elif field.type == "FileField" or field.type == "MultiplFileField":
return html.Div(class_="form-group")(
field.label(), field(class_="form-control-file")
)
else:
if is_hidden_field_filter(field):
return html.Div(class_="form-group")(field())
return html.Div(class_="form-group")(
field.label(), field(class_="form-control")
)
class QuickForm(Boot):
defaults = {
"_action": "",
"_method": "post",
"_extra_classes": None,
"_role": "form",
"_form_type": "basic",
"_columns": ("lg", 2, 10),
"_enctype": None,
"_button_map": {},
"_id": "",
"_novalidate": False,
"_render_kw": {},
}
def build(self):
form = self.args[0]
return html.Form(
Container(
*[
FormField(
field,
_form_type=self.get("_form_type"),
_columns=self.get("_columns"),
_button_map=self.get("_button_map"),
)
for field in form
]
)
)
class Modal(Boot):
defaults = {"_id": "modal_id", "_title": "Modal", "_tabindex": -1}
def build(self):
return html.Div(
class_="modal fade",
id=f'Modal_{self.get("_id")}',
tabindex=self.get("_tabindex"),
role="dialog",
aria_labelledby=f'ModalLabel_{self.get("_id")}',
aria_hidden="true",
)(
html.Div(class_="modal-dialog modal-dialog-centered", role="document")(
html.Div(class_="modal-content")(
html.Div(class_="modal-header")(
html.H(
size=5,
class_="modal-title",
id=f'ModalLabel-{self.get("_id")}',
)(self.get("_title")),
html.Button(
type="button",
class_="close",
data_bs_dismiss="modal",
aria_label="Close",
)(html.Span(aria_hidden="true")("×")),
),
html.Div(class_="modal-body")(*self.args),
self.get_footer(),
)
)
)
def get_footer(self):
if self.get("_footer"):
return html.Div(class_="modal-footer")(self.get("_footer"))
return ""
class ModalButton(Boot):
defaults = {
"_id": "modal_id",
"_text": "Submit",
"_context": "primary",
}
def build(self):
return html.Button(
type="button",
class_=f'btn btn-{self.get("_context")}{self.get_class()}',
data_bs_toggle="modal",
data_bs_target=f'#Modal_{self.get("_id")}',
)(self.get("_text"))
class ModalLink(Boot):
defaults = {
"_id": "modal_id",
"_context": "primary",
}
def build(self):
return html.A(
type="button",
data_toggle="modal",
data_target=f'#Modal_{self.get("_id")}',
**self.get_kwargs(),
)(*self.args)
class Pagination(Boot):
def init(self, *args, **kwargs):
self.pagination = args[0]
self.endpoint = args[1]
defaults = {
"_label": "Pagination",
"_request_param": "page",
"_url_kwargs": {},
"_fragment": "",
}
def get_prev_endpoint(self):
if self.pagination.has_prev:
return (
url_for(
self.endpoint,
**{
**request.args,
**{self.get("_request_param"): self.pagination.prev_num},
**self.get("_url_kwargs"),
},
)
+ self.get("_fragment")
)
return "#"
def get_next_endpoint(self):
if self.pagination.has_next:
return (
url_for(
self.endpoint,
**{
**request.args,
**{self.get("_request_param"): self.pagination.next_num},
**self.get("_url_kwargs"),
},
)
+ self.get("_fragment")
)
return "#"
def get_pages(self):
page_list_items = []
for page in self.pagination.iter_pages():
if page is not None:
if page == self.pagination.page:
page_list_items.append(
html.Li(class_="page-item active")(
html.Span(page, class_="page-link")
)
)
else:
page_list_items.append(
html.Li(class_="page-item")(
html.A(
page,
class_="page-link",
href=url_for(
self.endpoint,
**{
**request.args,
**{self.get("_request_param"): page},
**self.get("_url_kwargs"),
},
),
)
)
)
prev_page_list_item_class = "page-item"
if not self.pagination.has_prev:
prev_page_list_item_class += " disabled"
prev_page_list_item = html.Li(class_=prev_page_list_item_class)(
html.A("«", class_="page-link", href=self.get_prev_endpoint())
)
next_page_list_item_class = "page-item"
if not self.pagination.has_next:
next_page_list_item_class += " disabled"
next_page_list_item = html.Li(class_=next_page_list_item_class)(
html.A("»", class_="page-link", href=self.get_next_endpoint())
)
return [prev_page_list_item, *page_list_items, next_page_list_item]
def build(self):
return html.Nav(aria_label=self.get("_label"))(
html.Ul(class_="pagination justify-content-center")(*self.get_pages())
)
``` |
{
"source": "jordanbegg/Schengen-Calculator",
"score": 3
} |
#### File: app/main/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DateField, HiddenField
from wtforms.validators import DataRequired, ValidationError
from app.models import Trip
from flask_login import current_user
class TripForm(FlaskForm):
start = DateField("Start Date", validators=[DataRequired()])
end = DateField("End Date", validators=[DataRequired()])
submit_add_trip = SubmitField("Submit")
def validate_end(self, end):
if end.data < self.start.data:
raise ValidationError("End date cannot be before start date.")
trip = Trip.query.filter_by(
start=self.start.data, end=end.data, traveller=current_user
).first()
if trip:
raise ValidationError("Trip already exists!")
class EditProfileForm(FlaskForm):
first_name = StringField("First Name", validators=[DataRequired()])
surname = StringField("Surname", validators=[DataRequired()])
submit_edit_profile = SubmitField("Submit")
class EndDateForm(FlaskForm):
end = DateField("Calculation End Date", validators=[DataRequired()])
submit_update_end_date = SubmitField("Submit")
class DeleteTripForm(FlaskForm):
trip_id = HiddenField()
submit_delete_trip = SubmitField("Delete")
```
#### File: app/main/panels.py
```python
import bootlets.templates as bl
from flask import request
from app.main.forms import EditProfileForm
class EditProfilePanel:
def __init__(self, user):
self.user = user
self.edit_profile_form = EditProfileForm(request.form)
def build(self):
return bl.Container(bl.H("Edit Profile"), bl.Form(self.edit_profile_form))
def draw(self):
return self.build().draw()
``` |
{
"source": "jordan-benjamin/buf",
"score": 4
} |
#### File: buf/commands/chemical.py
```python
import os
import tabulate
from buf import user_input, error_messages, libraries
from typing import Sequence
instructions = """buf chemical:
This subcommand allows you to access and modify your chemical library, i.e. your personal \
list of chemicals and their molar masses.
Before making buffers that specify a chemical's concentration in molar, that chemical's molar \
mass must first be added to your chemical library. To do this, use 'buf -a <molar_mass> <chemical_names>...', where \
the chemical's molar mass is in g/mol. For example, after adding NaCl to your library with ``buf add -a 58.44 NaCl``, \
you can then ``buf make 2L 1M NaCl`` to calculate the mass of salt you would need to add to a 2L solution to raise the \
salt concentration to 1M (see 'buf help make' for more information on performing solution calculations).
Chemicals can have multiple names, which can be listed upon addition to your library. For example, using \
'buf chemical -a 58.44 NaCl salt' allows you use either the name 'salt' or 'NaCl' when making buffers (i.e. 'buf make 2L 1M NaCl' \
is equivalent to 'buf make 2L 1M salt', since both expressions refer to the same molar mass.
To add additional names to an existing entry in your chemical library (also known as 'nicknaming' the chemical), use \
'buf chemical -n <existing_chemical_name> <nicknames>...'. For example, if you added NaCl to your library with 'buf chemical \
-a 58.44 NaCl', and then nicknamed the chemical with 'buf chemical -n NaCl salt table_salt', you could use any of 'NaCl', 'salt', \
or 'table_salt' to refer to the same molar mass. Note that using 'buf chemical -a 58.44 NaCl table_salt salt' is equivalent to using \
'buf chemical -a 58.44 NaCl' followed by 'buf chemical -n NaCl table_salt salt'.
Another way to add chemicals to your library is by specifying a list of them in a text file. This file should contain one chemical \
per line, where the first word on each line specifies the chemical's molar mass, followed by the list of the chemical's names. Spaces should \
separate each item on a line. For example, if a file 'chemicals.txt' contains the following:
58.44 NaCl salt
68.08 Imidazole imi
74.55 KCl
Using 'buf chemical -a chemicals.txt' would add these three chemicals to your library.
To delete a chemical, use 'buf chemical -d <chemical_name>'. By default, chemical deletion is shallow/incomplete; the same chemical \
can still be accessed through its other names after one name has been deleted. For example, if 'buf chemical -a 58.44 NaCl salt' was used to \
add a chemical to our library, and then the name 'NaCl' was deleted with 'buf chemical -d NaCl', the name 'salt' would still be bound to a molar mass
of 58.44 g/mol in your chemical library. To delete a chemical entirely (i.e. delete all its names), use the '--complete' option. Using the example \
above, 'buf chemical -d NaCl --complete' would remove both the names 'NaCl' and 'salt' from our chemical library. To skip the program \
asking you to confirm your decision, use the '--confirm' option.
To view information about a specific chemical (its molar mass and additional names), use 'buf chemical <chemical_name>'. To view your entire \
chemical library, use 'buf chemical'.
"""
chemical_library_file = libraries.fetch_library_file_path("chemicals.txt")
def chemical(options : dict):
"""Parses dictionary of command line options and calls appropriate functions."""
if options["-a"]:
if options["<file_name>"]:
add_chemicals_from_file(options["<file_name>"])
else:
add_single_chemical(options["<molar_mass>"], options["<chemical_names>"])
elif options["-d"]:
delete_chemical(options["<chemical_name>"], complete_deletion=options["--complete"], prompt_for_confirmation= not options["--confirm"])
elif options["-n"]:
nickname_chemical(options["<existing_chemical_name>"], options["<nicknames>"])
elif options["<chemical_name>"]:
display_chemical_information(options["<chemical_name>"])
else:
display_chemical_library()
# --------------------------------------------------------------------------------
# --------------------------CHEMICAL DEFINITION AND CREATION----------------------
# --------------------------------------------------------------------------------
class Chemical:
"""A record that maps chemical names to a molar mass."""
def __init__(self, molar_mass: float, names: Sequence[str]):
self.molar_mass = molar_mass
self.names = names
def __repr__(self):
string = str(self.molar_mass)
for name in self.names:
string += " " + name
return string
def __eq__(self, other):
return self.molar_mass == other.molar_mass and set(self.names) == set(other.names)
def make_safe_chemical(molar_mass : str, names : list, chemical_library: dict = None):
"""Type checks user input, safely making a Chemical if input is valid."""
if chemical_library == None:
chemical_library = load_chemicals()
for name in names:
if name in chemical_library:
error_messages.chemical_already_exists(name)
if " " in name:
error_messages.spaces_in_chemical_name(name)
try:
molar_mass = float(molar_mass)
except:
error_messages.non_number_molar_mass(molar_mass)
if molar_mass <= 0:
error_messages.non_positive_molar_mass(molar_mass)
return Chemical(molar_mass, names)
# --------------------------------------------------------------------------------
# --------------------------------ADDING CHEMICALS--------------------------------
# --------------------------------------------------------------------------------
def add_single_chemical(molar_mass: str, names: Sequence[str]):
"""Adds single chemical to library."""
new_chemical = make_safe_chemical(molar_mass, names)
with open(chemical_library_file, "a") as file:
file.write(str(new_chemical) + "\n")
def add_chemicals_from_file(filename : str):
"""Parses specified file, adding a chemical to the library for each line in the file.
Each line in the file should first contain the chemicals's molar mass, followed by a list of its names.
All words should be separated by spaces. Example file:
58.44 NaCl table_salt sodium_chloride
74.55 KCl potassium_chloride
"""
if os.path.isfile(filename) == False:
error_messages.file_not_found(filename)
try:
with open(filename, "r") as file:
lines = file.readlines()
except:
error_messages.file_read_error(filename)
existing_chemical_library = load_chemicals()
new_chemical_names = []
new_chemical_objects = []
for line_number, line in enumerate(lines):
try:
words = line.split()
if len(words) == 0:
continue
elif len(words) < 2:
error_messages.line_too_short_in_chemical_file(line_number)
molar_mass = words[0]
names = words[1:]
new_chemical = make_safe_chemical(molar_mass, names, chemical_library=existing_chemical_library)
for name in names:
if name in new_chemical_names:
error_messages.duplicate_file_entry(name)
new_chemical_names.append(name)
new_chemical_objects.append(new_chemical)
except:
error_messages.add_from_file_termination(line_number, erroneous_line=line.strip("\n"), upper_case_data_type="Chemicals")
with open(chemical_library_file, "a") as file:
for new_chemical in new_chemical_objects:
file.write(str(new_chemical) + "\n")
print("Added the following chemicals to your library:", *new_chemical_names)
# --------------------------------------------------------------------------------
# -------------------------NICKNAMING/DELETING CHEMICALS--------------------------
# --------------------------------------------------------------------------------
def nickname_chemical(existing_chemical_name: str, new_names: Sequence[str]):
"""Adds additional names to an existing chemical in the library."""
chemical_library = load_chemicals()
if existing_chemical_name not in chemical_library:
error_messages.chemical_not_found(existing_chemical_name)
for new_name in new_names:
if new_name in chemical_library:
error_messages.chemical_already_exists(new_name)
if " " in new_name:
error_messages.spaces_in_chemical_name(new_name)
chemical_object = chemical_library[existing_chemical_name]
chemical_object.names += new_names
save_chemical_library(chemical_library)
def delete_chemical(chemical_name: str, complete_deletion: bool = False, prompt_for_confirmation: bool = True):
"""Deletes chemical from the library. If complete_deletion == False, only the specific name specified is deleted from \
the library. If true, then the entire chemical record (including all other names) is deleted."""
chemical_library = load_chemicals()
if chemical_name not in chemical_library:
error_messages.chemical_not_found(chemical_name)
chemical_object = chemical_library[chemical_name]
if complete_deletion:
names = chemical_object.names
if prompt_for_confirmation:
print("You are about to delete the following chemicals from your library:", *names)
user_input.confirm()
for name in names:
del (chemical_library[name])
else:
if prompt_for_confirmation:
print("You are about to delete '" + str(chemical_name) + "' from your chemical library.")
user_input.confirm()
chemical_object.names.remove(chemical_name)
del (chemical_library[chemical_name])
save_chemical_library(chemical_library)
print("Deletion successful.")
# --------------------------------------------------------------------------------
# ------------------------READING/WRITING TO CHEMICAL LIBRARY---------------------
# --------------------------------------------------------------------------------
def save_chemical_library(chemical_library: dict):
"""Saves chemical_library to file."""
unique_chemical_objects = []
for chemical_object in chemical_library.values():
if chemical_object not in unique_chemical_objects:
unique_chemical_objects.append(chemical_object)
with open(chemical_library_file, "w") as file:
for chemical_object in unique_chemical_objects:
file.write(str(chemical_object) + "\n")
def load_chemicals():
"""Loads chemical library from file."""
try:
with open(chemical_library_file, "r") as file:
chemical_lines = file.readlines()
chemicals = {}
for line in chemical_lines:
words = line.split()
molar_mass = words[0]
names = words[1:]
chemical = make_safe_chemical(molar_mass, names, chemical_library=chemicals)
for name in names:
chemicals[name] = chemical
return chemicals
except:
error_messages.library_load_error(lower_case_library_name="chemical")
def reset():
"""Wipes chemical library."""
with open(chemical_library_file, "w") as file:
pass
# --------------------------------------------------------------------------------
# -----------------------------DISPLAYING CHEMICALS-------------------------------
# --------------------------------------------------------------------------------
def display_chemical_information(chemical_name: str):
"""Displays the names and molar mass of a specified chemical."""
chemical_library = load_chemicals()
if chemical_name not in chemical_name:
error_messages.chemical_not_found(chemical_name)
chemical_object = chemical_library[chemical_name]
print("Chemical name: " + str(chemical_name))
other_names = [name for name in chemical_object.names if name != chemical_name]
print("Other names:", *other_names)
print("Molar mass: " + str(chemical_object.molar_mass))
def display_chemical_library():
"""Displays all chemicals in the library."""
chemical_library = load_chemicals()
print("The chemicals in your library are:")
table = []
for chemical_name, chemical_object in chemical_library.items():
table.append((chemical_name, chemical_object.molar_mass))
# Sorting by the chemical name, upper() is called so that all the upper case names don't precede all the lowercase ones.
table.sort(key=lambda entry: entry[0].upper())
print(tabulate.tabulate(table, headers=["Chemical Name", "Molar Mass (g/mol)"], tablefmt="fancy_grid"))
```
#### File: buf/buf/main.py
```python
from docopt import docopt
import sys
if __name__ == '__main__':
import commands
else:
import buf.commands as commands
# TODO: add buf reset
docstring = """
buf
Usage:
buf --version
buf help
buf help <subcommand_name>
buf chemical
buf chemical <chemical_name>
buf chemical -a <molar_mass> <chemical_names>...
buf chemical -a <file_name>
buf chemical -n <existing_chemical_name> <nicknames>...
buf chemical -d <chemical_name> [--complete] [--confirm]
buf recipe
buf recipe <recipe_name>
buf recipe -a <recipe_name> (<concentrations> <chemical_names>)...
buf recipe -a <file_name>
buf recipe -d <recipe_name> [--confirm]
buf make <volume> <recipe_name>
buf make <volume> (<concentrations> <chemical_names>)...
"""
def main():
"""Parses command line arguments, calling the correct modules/functions in turn.
If a module is found that matches a subcommand name, the function in the module that shares
the same name is called. For example, using the 'buf chemical <args>... [options]' subcommand
in turn calls buf.commands.chemical.chemical, passing in the dictionary of command line options
as a parameter."""
options = docopt(docstring, help=False, version="1.0.0")
for k, v in options.items():
if v:
if hasattr(commands, k):
module = getattr(commands, k)
func = getattr(module, k)
func(options)
def line(string):
"""Simulates a command line entry."""
sys.argv = string.split()
main()
def reset():
"""Wipes the recipe and chemical libraries."""
commands.chemical.reset()
commands.recipe.reset()
```
#### File: buf/buf/unit.py
```python
from sys import exit
from buf import error_messages
class UnitInfo:
"""Record that stores a list of equivalent unit symbols, their scale factor (the factor one needs to multiply by
to reach a standard unit, for example the scale factor of mL relative to L is 1e-3), and pointers to the units that are immediately
larger and smaller than it (for example, the UnitInfo describing mL might point to L and uL)."""
def __init__(self, symbols, scale_factor):
self.symbols = symbols
self.scale_factor = scale_factor
self.greater = None
self.lesser = None
def __lt__(self, other):
return self.scale_factor < other.scale_factor
# NOTE: not checking self.lesser since then the method becomes self-referencing
def __eq__(self, other):
return set(self.symbols) == set(other.symbols) and self.scale_factor == other.scale_factor \
and self.greater == other.greater
class UnitLadder:
"""Stores a hierarchy of units of a certain type, such as units of volume. Allows one to easily scale/convert physical
quantities between units in the ladder.
Some notes on terminology in this class:
- "Scaling up" a physical quantity means making quantity's unit larger (for example, going from mL to L). This means
that the magnitude of the physical quantity gets SMALLER after scaling up. The reverse is true for scaling down."""
def __init__(self, unit_dict):
symbol_to_info = {}
# Have to make this list instead of using symbol_to_info.values() after the next loop, since duplicates will appear multiple times.
unit_info_list = []
for symbol, scale_factor in unit_dict.items():
found = False
for unit_info in symbol_to_info.values():
if unit_info.scale_factor == scale_factor:
unit_info.symbols.append(symbol)
symbol_to_info[symbol] = unit_info
found = True
break
if not found:
new_unit_info = UnitInfo([symbol], scale_factor)
unit_info_list.append(new_unit_info)
symbol_to_info[symbol] = new_unit_info
unit_info_list.sort()
for index in range(len(unit_info_list)-1):
first_info = unit_info_list[index]
second_info = unit_info_list[index+1]
first_info.greater = second_info
second_info.lesser = first_info
self.unit_info_list = unit_info_list
self.symbol_to_info = symbol_to_info
self.symbols = list(unit_dict.keys())
def __contains__(self, item):
return item in self.symbol_to_info
def get_symbols(self):
return list(self.symbol_to_info.keys())
def get_scale_factor(self, symbol):
"""Return the scale factor of a given unit."""
return self.symbol_to_info[symbol].scale_factor
def can_scale_up_unit(self, symbol):
"""Checks if a unit exists in the ladder that is greater than the symbol given (which determines whether the
ladder can scale a physical quantity with the given unit to a larger unit)."""
if symbol not in self.symbol_to_info:
error_messages.unit_not_in_ladder(symbol)
unit_info = self.symbol_to_info[symbol]
if unit_info.greater:
return True
else:
return False
def can_scale_down_unit(self, symbol):
"""Checks if a unit exists in the ladder that is lesser than the symbol given (which determines whether the
ladder can scale a physical quantity with the given unit to a smaller unit)."""
if symbol not in self.symbol_to_info:
error_messages.unit_not_in_ladder(symbol)
unit_info = self.symbol_to_info[symbol]
if unit_info.lesser:
return True
else:
return False
def scale_up_unit(self, symbol):
"""Given a unit, returns the next unit larger than it, as well as the factor one would have to multiply
the magnitude of a physical quantity by to convert to the new unit."""
if symbol not in self.symbol_to_info:
error_messages.unit_not_in_ladder(symbol)
unit_info = self.symbol_to_info[symbol]
if unit_info.greater:
return unit_info.greater.symbols[0], (unit_info.scale_factor / unit_info.greater.scale_factor)
else:
error_messages.no_greater_unit_in_ladder(symbol)
def scale_down_unit(self, symbol):
"""Given a unit, returns the next unit smaller than it, as well as the factor one would have to multiply
the magnitude of a physical quantity by to convert to the new unit."""
if symbol not in self.symbol_to_info:
error_messages.unit_not_in_ladder(symbol)
unit_info = self.symbol_to_info[symbol]
if unit_info.lesser:
return unit_info.lesser.symbols[0], (unit_info.scale_factor / unit_info.lesser.scale_factor)
else:
error_messages.no_lesser_unit_in_ladder(symbol)
# NOTE: This method does NOT do any type checking.
def split_unit_quantity(string):
"""Given a physical quantity as a string, returns a tuple containing the quantity's magnitude and unit, both
as strings."""
quantity = ""
index = 0
quantity_characters = [str(num) for num in range(10)] + [".", "-", "+"]
for character in string:
if character in quantity_characters:
quantity+= character
index += 1
else:
break
symbol = string[index:]
return quantity, symbol
def scale_up_physical_quantity(quantity: float, symbol: str):
"""Scales up a physical quantity (ie. unit gets larger, magnitude gets smaller) until the magnitude is in the
range [1, 1000) or there is no greater unit to scale to. For example, "10000mL" would be scaled up to "10L"."""
if symbol in volume_units:
ladder = volume_units
elif symbol in mass_units:
ladder = mass_units
elif symbol in concentration_units:
ladder = concentration_units
else:
error_messages.unit_not_in_any_ladder(symbol)
while quantity >= 1000 and ladder.can_scale_up_unit(symbol):
new_symbol, scale_factor = ladder.scale_up_unit(symbol)
quantity *= scale_factor
symbol = new_symbol
return quantity, symbol
def scale_down_physical_quantity(magnitude: float, symbol: str):
"""Scales down a physical quantity (ie. unit gets smaller, magnitude gets larger) until the magnitude is in the
range [1, 1000) or there is no lesser unit to scale to. For example, "0.1L" would be scaled down to "100mL"."""
if symbol in volume_units:
ladder = volume_units
elif symbol in mass_units:
ladder = mass_units
elif symbol in concentration_units:
ladder = concentration_units
else:
error_messages.unit_not_in_any_ladder(symbol)
while magnitude < 1 and ladder.can_scale_down_unit(symbol):
new_symbol, scale_factor = ladder.scale_down_unit(symbol)
magnitude *= scale_factor
symbol = new_symbol
return magnitude, symbol
def scale_and_round_physical_quantity(magnitude: float, symbol : str):
"""Scales a physical quantity up/down so that its magnitude is in the range [1,1000), before rounding the magnitude
and returning the magnitude combined with the unit as a string."""
if magnitude >= 1000:
magnitude, symbol = scale_up_physical_quantity(magnitude, symbol)
elif magnitude < 1:
magnitude, symbol = scale_down_physical_quantity(magnitude, symbol)
magnitude = round(magnitude, 2)
return str(magnitude) + symbol
# Standardised to litres.
volume_units = UnitLadder({"L" : 1, "mL" : 1e-3, "µL" : 1e-6, "uL": 1e-6})
# Standardised to grams.
mass_units = UnitLadder({"kg" : 1000, "g" : 1, "mg" : 1e-3, "µg": 1e-6, "ug" : 1e-6})
# Standardised to molar.
concentration_units = UnitLadder({"M" : 1, "mM" : 1e-3, "µM" : 1e-6, "uM" : 1e-6})
valid_units = volume_units.symbols + mass_units.symbols + concentration_units.symbols + ["%"]
def volume_unit_to_litres(symbol):
"""Convenience function that returns the factor one must multiply to convert a physical quantity with the specified
unit of volume into litres."""
return volume_units.get_scale_factor(symbol)
def concentration_unit_to_molar(symbol):
"""Convenience function that returns the factor one must multiply to convert a physical quantity with the specified
unit of concentration into molar."""
return concentration_units.get_scale_factor(symbol)
def mass_unit_to_grams(symbol):
"""Convenience function that returns the factor one must multiply to convert a physical quantity with the specified
unit of mass into grams."""
return mass_units.get_scale_factor(symbol)
```
#### File: buf/tests/test_libraries.py
```python
from unittest import TestCase, mock
import unittest
from buf import libraries
import os
import sys
import tempfile
class TestMakeDir(TestCase):
"""Tests buf.libraries.make_library."""
def test_already_exists(self):
"""Tests that the function raises an error if the directory it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(IsADirectoryError):
libraries.make_library_dir()
def test_proper_directory_creation(self):
"""Tests that the function properly creates a directory if none exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.make_library_dir()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestEnsureLibraryDirExists(TestCase):
"""Tests buf.libraries.ensure_library_dir_exists."""
def test_existence_check(self):
"""Tests that the function checks whether library_dir exists."""
with mock.patch("buf.libraries.os.path.exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.ensure_library_dir_exists()
mock_check.assert_called_with(libraries.library_dir)
def test_directory_creation(self):
"""Tests that the function actually makes library_dir if it doesn't exist."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.ensure_library_dir_exists()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestAddLibraryFile(TestCase):
"""Tests buf.libraries.add_library_file."""
def test_library_dir_existence_check(self):
"""Tests that the function ensures that library_dir has already been created."""
with mock.patch("buf.libraries.ensure_library_dir_exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.add_library_file("file.txt")
mock_check.assert_called()
def test_file_already_exists_check(self):
"""Tests that the function raises an error if the file it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(FileExistsError):
libraries.add_library_file("file.txt")
def test_proper_file_creation(self):
"""Tests that the function properly creates a directory if none exists."""
test_file_name = "file.txt"
test_file_path = os.path.join(sys.prefix, libraries.library_dir, test_file_name)
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.ensure_library_dir_exists"):
with mock.patch("buf.libraries.open") as mock_open:
libraries.add_library_file(test_file_name)
mock_open.assert_called_with(test_file_path, "w")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jordanbertasso/rcds",
"score": 3
} |
#### File: rcds/project/config.py
```python
from itertools import tee
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Tuple, Union, cast
import jsonschema # type: ignore
from rcds import errors
from ..util import load_any
from ..util.jsonschema import DefaultValidatingDraft7Validator
config_schema_validator = DefaultValidatingDraft7Validator(
schema=load_any(Path(__file__).parent / "rcds.schema.yaml"),
format_checker=jsonschema.draft7_format_checker,
)
def parse_config(
config_file: Path,
) -> Iterable[Union[errors.ValidationError, Dict[str, Any]]]:
"""
Load and validate a config file, returning both the config and any
errors encountered.
:param pathlib.Path config_file: The challenge config to load
:returns: Iterable containing any errors (all instances of
:class:`rcds.errors.ValidationError`) and the parsed config. The config will
always be last.
"""
# root = config_file.parent
config = load_any(config_file)
schema_errors: Iterable[errors.SchemaValidationError] = (
errors.SchemaValidationError(str(e), e)
for e in config_schema_validator.iter_errors(config)
)
# Make a duplicate to check whethere there are errors returned
schema_errors, schema_errors_dup = tee(schema_errors)
# This is the same test as used in Validator.is_valid
if next(schema_errors_dup, None) is not None:
yield from schema_errors
yield config
def check_config(
config_file: Path,
) -> Tuple[Optional[Dict[str, Any]], Optional[Iterable[errors.ValidationError]]]:
"""
Load and validate a config file, returning any errors encountered.
If the config file is valid, the tuple returned contains the loaded config as
the first element, and the second element is None. Otherwise, the second
element is an iterable of errors that occurred during validation
This method wraps :func:`parse_config`.
:param pathlib.Path config_file: The challenge config to load
"""
load_data = parse_config(config_file)
load_data, load_data_dup = tee(load_data)
first = next(load_data_dup)
if isinstance(first, errors.ValidationError):
validation_errors = cast(
Iterable[errors.ValidationError],
filter(lambda v: isinstance(v, errors.ValidationError), load_data),
)
return (None, validation_errors)
else:
return (first, None)
def load_config(config_file: Path) -> Dict[str, Any]:
"""
Loads a config file, or throw an exception if it is not valid
This method wraps :func:`check_config`, and throws the first error returned
if there are any errors.
:param pathlib.Path config_file: The challenge config to load
:returns: The loaded config
"""
config, errors = check_config(config_file)
if errors is not None:
raise next(iter(errors))
# errors is None
assert config is not None
return config
```
#### File: rcds/project/project.py
```python
from pathlib import Path
from typing import Any, Dict, Optional
import docker # type: ignore
from jinja2 import Environment
from rcds.util import SUPPORTED_EXTENSIONS, find_files
from ..backend import BackendContainerRuntime, BackendScoreboard, load_backend_module
from ..challenge import Challenge, ChallengeLoader
from . import config
from .assets import AssetManager
class Project:
"""
An rCDS project; the context that all actions are done within
"""
root: Path
config: dict
challenges: Dict[Path, Challenge]
challenge_loader: ChallengeLoader
asset_manager: AssetManager
container_backend: Optional[BackendContainerRuntime] = None
scoreboard_backend: Optional[BackendScoreboard] = None
jinja_env: Environment
docker_client: Any
def __init__(
self, root: Path, docker_client: Optional[docker.client.DockerClient] = None
):
"""
Create a project
"""
root = root.resolve()
try:
cfg_file = find_files(
["rcds"], SUPPORTED_EXTENSIONS, path=root, recurse=False
)["rcds"]
except KeyError:
raise ValueError(f"No config file found at '{root}'")
self.root = root
self.config = config.load_config(cfg_file)
self.challenge_loader = ChallengeLoader(self)
self.challenges = dict()
self.asset_manager = AssetManager(self)
self.jinja_env = Environment(autoescape=False)
if docker_client is not None:
self.docker_client = docker_client
else:
self.docker_client = docker.from_env()
def load_all_challenges(self) -> None:
for ext in SUPPORTED_EXTENSIONS:
for chall_file in self.root.rglob(f"challenge.{ext}"):
path = chall_file.parent
self.challenges[
path.relative_to(self.root)
] = self.challenge_loader.load(path)
def get_challenge(self, relPath: Path) -> Challenge:
return self.challenges[relPath]
def load_backends(self) -> None:
for backend_config in self.config["backends"]:
backend_info = load_backend_module(backend_config["resolve"])
if self.scoreboard_backend is None and backend_info.HAS_SCOREBOARD:
self.scoreboard_backend = backend_info.get_scoreboard(
self, backend_config["options"]
)
if self.container_backend is None and backend_info.HAS_CONTAINER_RUNTIME:
self.container_backend = backend_info.get_container_runtime(
self, backend_config["options"]
)
# TODO: maybe don't reinitialize here?
self.challenge_loader = ChallengeLoader(self)
```
#### File: rcds/util/jsonschema.py
```python
from jsonschema import Draft7Validator, validators # type: ignore
# From
# https://python-jsonschema.readthedocs.io/en/stable/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance # noqa: B950
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
yield from validate_properties(validator, properties, instance, schema)
return validators.extend(validator_class, {"properties": set_defaults})
DefaultValidatingDraft7Validator = extend_with_default(Draft7Validator)
```
#### File: tests/challenge/test_docker.py
```python
from pathlib import Path
from typing import cast
import pytest # type: ignore
from rcds import ChallengeLoader, Project
from rcds.challenge import docker
class TestGetContextFiles:
def test_basic(self, datadir) -> None:
df_root = datadir / "contexts" / "basic"
assert df_root.is_dir()
got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
assert got == {
"Dockerfile",
"file",
"a/file",
"a/b/file",
".file",
"a/.file",
"a/b/.file",
}
def test_with_dockerignore(self, datadir: Path) -> None:
df_root = datadir / "contexts" / "dockerignore"
assert df_root.is_dir()
got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
assert got == {"Dockerfile", ".dockerignore", ".file", "file"}
def test_complex_dockerignore(self, datadir: Path) -> None:
df_root = datadir / "contexts" / "complex_dockerignore"
assert df_root.is_dir()
got = {str(p.relative_to(df_root)) for p in docker.get_context_files(df_root)}
assert got == {"a", "b", "c/file", "d/file"}
class TestGenerateSum:
def test_basic(self, datadir) -> None:
df_root = datadir / "contexts" / "basic"
assert df_root.is_dir()
# TODO: better way of testing than blackbox hash compare
assert (
docker.generate_sum(df_root)
== "683c5631d14165f0326ef55dfaf5463cc0aa550743398a4d8e31d37c4f5d6981"
)
class TestContainerManager:
@pytest.fixture()
def project(self, datadir: Path) -> Project:
return Project(datadir / "project")
def test_omnibus(self, project: Project) -> None:
challenge_loader = ChallengeLoader(project)
chall = challenge_loader.load(project.root / "chall")
container_mgr = docker.ContainerManager(chall)
simple_container = container_mgr.containers["simple"]
assert simple_container.name == "simple"
assert simple_container.IS_BUILDABLE
assert type(simple_container) == docker.BuildableContainer
simple_container = cast(docker.BuildableContainer, simple_container)
assert simple_container.get_full_tag().startswith("registry.com/ns/")
assert "simple" in simple_container.get_full_tag()
assert chall.config["containers"]["simple"]["image"].startswith(
"registry.com/ns/"
)
assert "simple" in chall.config["containers"]["simple"]["image"]
assert simple_container.dockerfile == "Dockerfile"
assert simple_container.buildargs == dict()
complex_container = container_mgr.containers["complex"]
assert complex_container.name == "complex"
assert complex_container.IS_BUILDABLE
assert type(complex_container) == docker.BuildableContainer
complex_container = cast(docker.BuildableContainer, complex_container)
assert complex_container.get_full_tag().startswith("registry.com/ns/")
assert "complex" in complex_container.get_full_tag()
assert chall.config["containers"]["complex"]["image"].startswith(
"registry.com/ns/"
)
assert "complex" in chall.config["containers"]["complex"]["image"]
assert complex_container.dockerfile == "Dockerfile.alternate"
assert complex_container.buildargs["foo"] == "bar"
pg_container = container_mgr.containers["postgres"]
assert pg_container.name == "postgres"
assert not pg_container.IS_BUILDABLE
assert type(pg_container) == docker.Container
assert pg_container.get_full_tag() == "postgres"
def test_multiple_chall_independence(self, project) -> None:
challenge_loader = ChallengeLoader(project)
chall1 = challenge_loader.load(project.root / "chall")
chall2 = challenge_loader.load(project.root / "chall2")
chall1_mgr = docker.ContainerManager(chall1)
chall2_mgr = docker.ContainerManager(chall2)
assert "chall2ctr" not in chall1_mgr.containers
assert "postgres" not in chall2_mgr.containers
``` |
{
"source": "JordanBlocher/L96",
"score": 2
} |
#### File: JordanBlocher/L96/L96animate.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from L96 import L96
nspinup = 1000 # time steps to spin up truth run
nmax = 10000 # number of ob times.
np.random.seed(42) # fix random seed for reproducibility
F = 8; deltaF = 1./8.; Fcorr = np.exp(-1)**(1./3.) # efolding over n timesteps, n=3
model = L96(n=80,F=F,deltaF=deltaF,Fcorr=Fcorr,diff_max=2.5,diff_min=0.5) # model instance for truth run
for nt in range(nspinup): # spinup truth run
model.advance()
uu = []; tt = []
N = model.n
x = np.arange(N)
fig, ax = plt.subplots()
line, = ax.plot(x, model.x.squeeze())
ax.set_xlim(0,N-1)
#ax.set_ylim(3,3)
#Init only required for blitting to give a clean slate.
def init():
global line
line.set_ydata(np.ma.array(x, mask=True))
return line,
def updatefig(n):
global tt,uu,vspec
model.advance()
u = model.x.squeeze()
line.set_ydata(u)
print n,u.min(),u.max()
uu.append(u); tt.append(n*model.dt)
return line,
#Writer = animation.writers['ffmpeg']
#writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani = animation.FuncAnimation(fig, updatefig, np.arange(1,nmax+1), init_func=init,
interval=25, blit=True, repeat=False)
#ani.save('KS.mp4',writer=writer)
plt.show()
plt.figure()
# make contour plot of solution, plot spectrum.
ncount = len(uu)
uu = np.array(uu); tt = np.array(tt)
print tt.min(), tt.max()
print uu.shape
uup = uu - uu.mean(axis=0)
print uup.shape
cov = np.dot(uup.T,uup)/(ncount-1)
print 'cov',cov.min(), cov.max(), cov.shape
nplt = 500
print uu[:nplt].min(), uu[:nplt].max()
plt.contourf(x,tt[:nplt],uu[:nplt],np.linspace(-18,18,41),cmap=plt.cm.bwr,extend='both')
plt.xlabel('x')
plt.ylabel('t')
plt.colorbar()
plt.title('time-longitude snapshot of modified L96 model')
plt.savefig('hovmuller.png')
plt.figure()
print cov.min(), cov.max()
plt.pcolormesh(x,x,cov,cmap=plt.cm.bwr,vmin=-30,vmax=30)
plt.title('climatological covariance matrix for modified L96 model')
plt.xlabel('grid index')
plt.ylabel('grid index')
plt.colorbar()
plt.savefig('covmat.png')
plt.show()
``` |
{
"source": "jordanbroberts/python-myBlog",
"score": 2
} |
#### File: python-myBlog/blog/views.py
```python
from django.shortcuts import render, get_object_or_404
from .models import Post
from taggit.models import Tag
def home(request):
return render(request, 'home.html')
def index(request, tag_slug=None ):
allpost = Post.objects.all()
tag = None
if 'category' in request.GET:
allpost = allpost.filter(category=int(request.GET.get('category')))
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
allpost = allpost.filter(tags__in=[tag])
return render(request,'index.html',{'posts': allpost.order_by('-created_on'), 'tag':tag })
def view_post(request, pk):
post = Post.objects.get(pk=pk)
return render(request, 'post.html', {'post': post})
``` |
{
"source": "jordanbroberts/python-Snake",
"score": 3
} |
#### File: jordanbroberts/python-Snake/PythonSnake.py
```python
import pygame, sys
from pygame.locals import *
import time
import random
pygame.init()
# set up the colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
YELLOW = (255,255,0)
# required globals
window_width = 400
window_height = 400
# Frames per seconds
FPS = 15
# setting the display size
DISPLAYSURF = pygame.display.set_mode((window_width, window_height))
# setting the caption
pygame.display.set_caption('Snake')
# loading the snake image
img = pygame.image.load('snake.png')
def game_startup_screen():
intro = True
while intro:
# get which key is pressed
for event in pygame.event.get():
# if the X(close button) in the menu pane is pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
# if Q is pressed
if event.key == pygame.K_q:
pygame.quit()
quit()
# if C is pressed
if event.key == pygame.K_c:
intro = False
# fill the whole display with white color
DISPLAYSURF.fill(WHITE)
display_message_on_screen('Snake', GREEN, -100)
display_message_on_screen('Press C to Play and Q to Quit', BLACK, -70)
pygame.display.update()
def pause():
pause = True
while pause:
# get which key is pressed
for event in pygame.event.get():
# if the X(close button) in the menu pane is pressed
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
# if Q is pressed
if event.key == pygame.K_q:
pygame.quit()
quit()
# if C is pressed
if event.key == pygame.K_c:
pause = False
# fill the whole display with white color
DISPLAYSURF.fill(WHITE)
display_message_on_screen('Paused', BLUE, -100)
display_message_on_screen('Press C to Play and Q to Quit', BLUE, -70)
pygame.display.update()
def display_message_on_screen(message, color, y_displace = 0):
# set the font
font = pygame.font.SysFont('sanserif', 30)
text = font.render(message, True, color)
textRect = text.get_rect()
# write the text in center of the window
textRect.center = (window_width // 2), (window_height // 2) + y_displace
DISPLAYSURF.blit(text, textRect)
def snake(coord_list, global_coord_offset):
# check the direction of snake's head to rotate accordingly
if direction == 'right':
head = pygame.transform.rotate(img, 270)
if direction == 'left':
head = pygame.transform.rotate(img, 90)
if direction == 'up':
head = img
if direction == 'down':
head = pygame.transform.rotate(img, 180)
# display head
DISPLAYSURF.blit(head, (coord_list[-1][0], coord_list[-1][1]))
# display body
for coord in coord_list[:-1]:
pygame.draw.rect(DISPLAYSURF, BLACK, [coord[0], coord[1], global_coord_offset, global_coord_offset])
def score(score):
# set the font
font = pygame.font.SysFont('sanserif', 30)
text = font.render('Score: ' + str(score), True, BLACK )
DISPLAYSURF.blit(text, [160,0])
def start_game():
# required variables
global direction
# snake's head will point at right direction on each startup
direction = 'right'
# get the center of the screen
x_coord = window_width // 2
y_coord = window_height // 2
# declaring offset to move the snake
x_coord_offset = 0
y_coord_offset = 0
# declaring the number of pixels snake will move on each move
global_coord_offset = 10
# for setting frames per sec
clock = pygame.time.Clock()
# for storing the snake's body
coord_list = []
snakeLength = 1
# snake's food coordinates
food_x_coord = round(random.randrange(0, window_width - global_coord_offset) // 10.0) * 10.0
food_y_coord = round(random.randrange(0, window_height - global_coord_offset) // 10.0) * 10.0
exit = False
game_over = False
while not exit: # main game loop
DISPLAYSURF.fill(WHITE)
# this loop will execute when game is over
while game_over == True:
display_message_on_screen('GAME OVER', RED)
display_message_on_screen('Press C to Continue and Q to Quit', BLACK, 50)
pygame.display.update()
# get which key is pressed
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit = True
game_over = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
exit = True
game_over = False
if event.key == pygame.K_c:
start_game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit = True
# if key is pressed
if event.type == pygame.KEYDOWN:
# if left arrow key is pressed move to left
if event.key == pygame.K_LEFT:
x_coord_offset = -global_coord_offset
y_coord_offset = 0
direction = 'left'
# if right arrow key is pressed move to right
elif event.key == pygame.K_RIGHT:
x_coord_offset = global_coord_offset
y_coord_offset = 0
direction = 'right'
# if right arrow key is pressed move to right
elif event.key == pygame.K_UP:
y_coord_offset = -global_coord_offset
x_coord_offset = 0
direction = 'up'
# if right arrow key is pressed move to right
elif event.key == pygame.K_DOWN:
y_coord_offset = global_coord_offset
x_coord_offset = 0
direction = 'down'
# to pause the game
elif event.key == pygame.K_p:
pause()
# defining boundaries
if abs(x_coord) >= window_width or x_coord < 0 or abs(y_coord) >= window_height or y_coord < 0:
game_over = True
# move snake with specified offset
x_coord += x_coord_offset
y_coord += y_coord_offset
# pygame.draw.rect(where_do_you_wanna_draw, color, [x_coord, y_coord, width, height])
pygame.draw.rect(DISPLAYSURF, YELLOW, [food_x_coord, food_y_coord, global_coord_offset, global_coord_offset])
coord_head = []
coord_head.append(x_coord)
coord_head.append(y_coord)
coord_list.append(coord_head)
if len(coord_list) > snakeLength:
del coord_list[0]
# check if snake touches it's own body
for current_coord in coord_list[:-1]:
if current_coord == coord_head:
game_over = True
# draw the snake and score on screen
snake(coord_list, global_coord_offset)
score(snakeLength - 1)
pygame.display.update()
# if snake eats the food
if x_coord == food_x_coord and y_coord == food_y_coord:
food_x_coord = round(random.randrange(0, window_width - global_coord_offset) // 10.0) * 10.0
food_y_coord = round(random.randrange(0, window_height - global_coord_offset) // 10.0) * 10.0
snakeLength += 1
# regulating the game speed
clock.tick(FPS)
time.sleep(2)
pygame.quit()
quit()
game_startup_screen()
start_game()
``` |
{
"source": "jordanbull23/Icourses-Videos-and-PPTs-Download",
"score": 3
} |
#### File: Icourses-Videos-and-PPTs-Download/src/change_name.py
```python
def change_name_windows(name_dict, loc):
with open(loc + '\\change_name.cmd', 'w') as f:
loc = loc.replace('\\', r'\\') + r'\\'
i = 0
for key in name_dict:
i = i+1
old_name = str(key).split(r'/')[-1]
tailor = old_name.split('.')[-1]
new_name = str(i) + '-' + name_dict[key] + '.' + tailor
if(r'/' in new_name):
new_name = new_name.replace(r'/', ' ')
try:
f.write(r'ren "%s" "%s"&' % (old_name, new_name))
f.write('\n')
except:
continue
finally:
pass
f.close()
def change_name_linux(name_dict, loc):
with open(loc + '/change_name.sh', 'w') as f:
i = 0
for key in name_dict:
i = i+1
old_name = str(key).split(r'/')[-1]
tailor = old_name.split('.')[-1]
new_name = str(i) + '-' + name_dict[key] + '.' + tailor
if(r'/' in new_name):
new_name = new_name.replace(r'/', ' ')
try:
f.write(r'mv "%s" "%s"' % (old_name, new_name))
f.write('\n')
except:
continue
finally:
pass
f.close()
def get_name_dict(mp4_list, pdf_list, source_list, homework_list, exampaper_list):
name_dict = {}
for key in mp4_list:
name_dict[(key.split('/')[-1])] = mp4_list[key]
for key in pdf_list:
name_dict[(key.split('/')[-1])] = pdf_list[key]
for key in source_list:
name_dict[(key.split('/')[-1])] = source_list[key]
for key in homework_list:
name_dict[(key.split('/')[-1])] = homework_list[key]
for key in exampaper_list:
name_dict[(key.split('/')[-1])] = exampaper_list[key]
return name_dict
def change_name(mp4_list, pdf_list, source_list, homework_list, exampaper_list, loc, mode):
name_dict = get_name_dict(
mp4_list, pdf_list, source_list, homework_list, exampaper_list)
if(mode == 0):
change_name_windows(name_dict, loc)
if(mode == 1):
change_name_linux(name_dict, loc)
```
#### File: Icourses-Videos-and-PPTs-Download/src/get_res_old.py
```python
from bs4 import BeautifulSoup
import requests
def getRess1(html):
soup = BeautifulSoup(html.text, 'lxml')
datasid = []
for link in soup.find_all(
'li', class_='chapter-bind-click panel noContent'):
sec_id = link.get('data-id')
datasid.append(sec_id)
return datasid
def getRess2(html):
soup = BeautifulSoup(html.text, 'lxml')
datasid = []
for link in soup.find_all(
'a', class_='chapter-body-content-text section-event-t no-load'):
sec_id = link.get('data-secid')
datasid.append(sec_id)
return datasid
def get_source_link(html):
source_list = {}
soup = BeautifulSoup(html.text, 'lxml')
for link in soup.find_all(
'a', class_='courseshareresources-content clearfix'):
source_list[link.get('data-url')] = '其他资源-' + link.get('data-title')
return source_list
def get_homework_and_exampaper_link(html, name):
source_list = {}
soup = BeautifulSoup(html.text, 'lxml')
for link in soup.find_all('a', {'data-class': 'media'}):
source_list[link.get(
'data-url')] = str(name) + '-' + link.get('data-title')
return source_list
def get_download_link(datasid, id):
mp4_list = {}
pdf_list = {}
header = {
'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie':
'JSESSIONID=14D2C920A7C2D8F83C4321EDAD8AC3CF-n1; Hm_lvt_787dbcb72bb32d4789a985fd6cd53a46=1528179131,1528309476,1528342756; hepUserSsoServerSSOServerTokenID=<PASSWORD>; H<PASSWORD>_<PASSWORD>d4789a985fd6cd53a46=1528342771',
'Host':
'www.icourses.cn',
'Origin':
'http://www.icourses.cn',
'Referer':
'http://www.icourses.cn/web/sword/portal/shareDetails?cId=' + str(id),
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
}
url = 'http://www.icourses.cn/web//sword/portal/getRess'
for i in datasid:
payload = {'sectionId': i}
result = requests.post(url, params=payload, headers=header)
json = result.json()
if len(json['model']['listRes']) != 0:
loc = json['model']['listRes']
for i in loc:
if i['mediaType'] == 'mp4':
if 'fullResUrl' in i:
mp4_list[i['fullResUrl']] = i['resSortDesc'] + \
'-' + i['title']
elif i['mediaType'] in ['ppt', 'pdf']:
if 'fullResUrl' in i:
pdf_list[i['fullResUrl']] = i['resSortDesc'] + \
'-' + i['title']
return mp4_list, pdf_list
``` |
{
"source": "jordancaraballo/nga-deep-learning",
"score": 2
} |
#### File: scripts/core/utils.py
```python
import gc # clean garbage collection
import glob # get global files from directory
import random # for random integers
from tqdm import tqdm # for progress bar
import numpy as np # for arrays modifications
import cupy as cp # for arrays modifications
import tensorflow as tf # deep learning framework
import scipy.signal # for postprocessing
import math # for math calculations
import rasterio as rio # read rasters
# Has a bug and will be included when bug is fixed.
# from cuml.dask.preprocessing import OneHotEncoder, LabelBinarizer
# For generating one-hot encoder labels
from datetime import datetime
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.callbacks import TensorBoard, CSVLogger
# --------------------------------------------------------------------------
# Preprocessing Functions
# --------------------------------------------------------------------------
def image_normalize(img, axis=(0, 1), c=1e-8):
"""
Normalize to zero mean and unit standard deviation along the given axis.
Args:
img (numpy or cupy): array (w, h, c)
axis (integer tuple): into or tuple of width and height axis
c (float): epsilon to bound given std value
Return:
Normalize single image
----------
Example
----------
image_normalize(arr, axis=(0, 1), c=1e-8)
"""
return (img - img.mean(axis)) / (img.std(axis) + c)
def batch_normalize(batch, axis=(0, 1), c=1e-8):
"""
Normalize batch to zero mean and unit standard deviation.
Args:
img (numpy or cupy): array (n, w, h, c)
axis (integer tuple): into or tuple of width and height axis
c (float): epsilon to bound given std value
Return:
Normalize batch of images.
----------
Example
----------
batch_normalize(arr, axis=(0, 1), c=1e-8)
"""
# Note: for loop was proven to be faster than map method
for b in range(batch.shape[0]):
batch[b, :, :, :] = image_normalize(batch[b, :, :, :], axis=axis, c=c)
return batch
def gen_data_npz(fimg, img, mask, config, ntiles=1000, save_dir='train'):
"""
Extract random patches from cupy arrays.
Args:
fimg (str): data filename
img (cupy.array): cupy array with data
mask (cupy.array): cupy array with mask
save_dir (str): directory to save output
Return:
save dataset to save_dir.
----------
Example
----------
gen_data_npz('image.tif', arr, mask, config, 8000, 'output')
"""
# set dimensions of the input image array, and get desired tile size
z_dim, x_dim, y_dim = img.shape
tsz = config.TILE_SIZE
# placeholders for final datasets
img_cp = cp.empty((ntiles, tsz, tsz, z_dim), dtype=cp.float32)
mask_np = np.empty((ntiles, tsz, tsz, config.N_CLASSES), dtype=np.float16)
# generate n number of tiles
for i in tqdm(range(ntiles)):
# Generate random integers from image
xc = random.randint(0, x_dim - tsz)
yc = random.randint(0, y_dim - tsz)
# verify data is not on nodata region
while cp.any(
img[:, xc:(xc + tsz), yc:(yc + tsz)] == config.NODATA_VAL
):
xc = random.randint(0, x_dim - tsz)
yc = random.randint(0, y_dim - tsz)
# change order to (h, w, c)
tile_img = cp.moveaxis(
img[:, xc:(xc + tsz), yc:(yc + tsz)], 0, -1
)
# TODO: replace with cuml One-hot encoder on future date when they fix
# a bug on the output types. Using to_categorical in the meantime
# Converts labels into one-hot encoding labels
tile_mask = to_categorical(
cp.asnumpy(mask[xc:(xc + tsz), yc:(yc + tsz)]),
num_classes=config.N_CLASSES, dtype='float16'
)
# maybe standardize here? depends on performance of single img vs batch
img_cp[i, :, :, :] = tile_img
mask_np[i, :, :, :] = tile_mask
# normalize
if config.NORMALIZE:
img_cp = img_cp / config.normalization_factor
# standardize
if config.STANDARDIZE:
img_cp = batch_normalize(img_cp, axis=(0, 1), c=1e-8)
# save dataset into local disk, npz format with x and y labels
cp.savez(f'{save_dir}/{fimg[:-4]}.npz', x=img_cp, y=cp.asarray(mask_np))
# --------------------------------------------------------------------------
# Training Functions
# --------------------------------------------------------------------------
def get_tensorslices(data_dir='', img_id='x', label_id='y'):
"""
Getting tensor slices from disk.
Args:
data_dir (str): directory where data resides
img_id (str): object id from npz file to get data from
label_id (str): object id from npz file to get labels from
Return:
get image and label datasets
----------
Example
----------
get_tensorslices(data_dir='images', img_id='x', label_id='y')
"""
# open files and generate training dataset
images = np.array([])
labels = np.array([])
# read all data files from disk
for f in glob.glob(f'{data_dir}/*'):
with np.load(f) as data:
# vstack image batches into memory
if images.size: # if images has elements, vstack new batch
images = np.vstack([images, data[img_id]])
else: # if images empty, images equals new batch
images = data[img_id]
# vstack label batches into memory
if labels.size: # if labels has elements, vstack new batch
labels = np.vstack([labels, data[label_id]])
else: # if labels empty, images equals new batch
labels = data[label_id]
return images, labels
def data_augment(image, label):
"""
Augment data for semantic segmentation.
Args:
image (numpy.array): image numpy array
label (numpy.array): image numpy array
Return:
augmented image and label
----------
Example
----------
data_augment(image, label)
"""
# Thanks to the dataset.prefetch(AUTO) statement in the next function
# (below), this happens essentially for free on TPU. Data pipeline code
# is executed on the CPU part of the TPU, TPU is computing gradients.
randint = np.random.randint(1, 7)
if randint == 1: # flip left and right
image = tf.image.random_flip_left_right(image)
label = tf.image.random_flip_left_right(label)
elif randint == 2: # reverse second dimension
image = tf.image.random_flip_up_down(image)
label = tf.image.random_flip_up_down(label)
elif randint == 3: # rotate 90 degrees
image = tf.image.rot90(image, k=1)
label = tf.image.rot90(label, k=1)
elif randint == 4: # rotate 180 degrees
image = tf.image.rot90(image, k=2)
label = tf.image.rot90(label, k=2)
elif randint == 5: # rotate 270 degrees
image = tf.image.rot90(image, k=3)
label = tf.image.rot90(label, k=3)
return image, label
def get_training_dataset(dataset, config, do_aug=False, drop_remainder=False):
"""
Return training dataset to feed tf.fit.
Args:
dataset (tf.dataset): tensorflow dataset
config (Config): Config object with parameters
do_aug (bool): perform augmentation on the fly?
drop_remainder (bool): drop remaineder when value does not match batch
Return:
tf dataset for training
----------
Example
----------
get_tensorslices(data_dir='images', img_id='x', label_id='y')
"""
dataset = dataset.map(data_augment, num_parallel_calls=config.AUTOTUNE)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(config.BATCH_SIZE, drop_remainder=drop_remainder)
# prefetch next batch while training (autotune prefetch buffer size)
dataset = dataset.prefetch(config.AUTOTUNE)
return dataset
def gen_callbacks(config, metadata):
"""
Generate tensorflow callbacks.
Args:
config (Config): object with configurations
metadata (dict): directory with callback metadata values
Return:
list of callback functions
----------
Example
----------
gen_callbacks(config, metadata)
"""
callback_list = list()
if 'TensorBoard' in config.CALLBACKS:
# Generating tensorboard callbacks
tensor = TensorBoard(
log_dir=config.MODEL_SAVEDIR, write_graph=True,
histogram_freq=metadata['history_freq']
)
callback_list.append(tensor)
if 'CSVLogger' in config.CALLBACKS:
# initialize model csv logger callback
csv_outfile = config.MODEL_OUTPUT_NAME[:-3] + '_' + \
datetime.now().strftime("%Y%m%d-%H%M%S")+'.csv'
csvlog = CSVLogger(csv_outfile, append=True, separator=';')
callback_list.append(csvlog)
if 'EarlyStopping' in config.CALLBACKS:
# initialize model early stopping callback
early_stop = EarlyStopping(
patience=metadata['patience_earlystop'],
monitor=metadata['monitor_earlystop']
)
callback_list.append(early_stop)
if 'ModelCheckpoint' in config.CALLBACKS:
# initialize model checkpoint callback
checkpoint = ModelCheckpoint(
filepath=config.MODEL_OUTPUT_NAME[:-3]+'_{epoch:02d}.h5',
monitor=metadata['monitor_checkpoint'],
save_best_only=metadata['save_best_only'],
save_freq=metadata['save_freq'],
verbose=1
)
callback_list.append(checkpoint)
return callback_list
# --------------------------------------------------------------------------
# Prediction Functions
# --------------------------------------------------------------------------
def pad_image(img, target_size):
"""
Pad an image up to the target size.
Args:
img (numpy.arry): image array
target_size (int): image target size
Return:
padded image array
----------
Example
----------
pad_image(img, target_size=256)
"""
rows_missing = target_size - img.shape[0]
cols_missing = target_size - img.shape[1]
padded_img = np.pad(
img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant'
)
return padded_img
def predict_windowing(x, model, config, spline):
"""
Predict scene using windowing mechanisms.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array probabilities
----------
Example
----------
predict_windowing(x, model, config, spline)
"""
print("Entering windowing prediction", x.shape)
img_height = x.shape[0]
img_width = x.shape[1]
n_channels = x.shape[2]
# make extended img so that it contains integer number of patches
npatches_vertical = math.ceil(img_height / config.TILE_SIZE)
npatches_horizontal = math.ceil(img_width / config.TILE_SIZE)
extended_height = config.TILE_SIZE * npatches_vertical
extended_width = config.TILE_SIZE * npatches_horizontal
ext_x = np.zeros(
shape=(extended_height, extended_width, n_channels), dtype=np.float32
)
# fill extended image with mirrors:
ext_x[:img_height, :img_width, :] = x
for i in range(img_height, extended_height):
ext_x[i, :, :] = ext_x[2 * img_height - i - 1, :, :]
for j in range(img_width, extended_width):
ext_x[:, j, :] = ext_x[:, 2 * img_width - j - 1, :]
# now we assemble all patches in one array
patches_list = [] # do vstack later instead of list
for i in range(0, npatches_vertical):
for j in range(0, npatches_horizontal):
x0, x1 = i * config.TILE_SIZE, (i + 1) * config.TILE_SIZE
y0, y1 = j * config.TILE_SIZE, (j + 1) * config.TILE_SIZE
patches_list.append(ext_x[x0:x1, y0:y1, :])
patches_array = np.asarray(patches_list)
# standardize
if config.STANDARDIZE:
patches_array = batch_normalize(patches_array, axis=(0, 1), c=1e-8)
# predictions:
patches_predict = \
model.predict(patches_array, batch_size=config.PRED_BATCH_SIZE)
prediction = np.zeros(
shape=(extended_height, extended_width, config.N_CLASSES),
dtype=np.float32
)
# ensemble of patches probabilities
for k in range(patches_predict.shape[0]):
i = k // npatches_horizontal
j = k % npatches_horizontal
x0, x1 = i * config.TILE_SIZE, (i + 1) * config.TILE_SIZE
y0, y1 = j * config.TILE_SIZE, (j + 1) * config.TILE_SIZE
prediction[x0:x1, y0:y1, :] = patches_predict[k, :, :, :] * spline
return prediction[:img_height, :img_width, :]
def predict_sliding(x, model, config, spline):
"""
Predict scene using sliding windows.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array probabilities
----------
Example
----------
predict_windowing(x, model, config, spline)
"""
stride = math.ceil(config.TILE_SIZE * (1 - config.PRED_OVERLAP))
tile_rows = max(
int(math.ceil((x.shape[0] - config.TILE_SIZE) / stride) + 1), 1
) # strided convolution formula
tile_cols = max(
int(math.ceil((x.shape[1] - config.TILE_SIZE) / stride) + 1), 1
) # strided convolution formula
print(f'{tile_cols} x {tile_rows} prediction tiles @ stride {stride} px')
full_probs = np.zeros((x.shape[0], x.shape[1], config.N_CLASSES))
count_predictions = \
np.zeros((x.shape[0], x.shape[1], config.N_CLASSES))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + config.TILE_SIZE, x.shape[1])
y2 = min(y1 + config.TILE_SIZE, x.shape[0])
x1 = max(int(x2 - config.TILE_SIZE), 0)
y1 = max(int(y2 - config.TILE_SIZE), 0)
img = x[y1:y2, x1:x2]
padded_img = pad_image(img, config.TILE_SIZE)
tile_counter += 1
padded_img = np.expand_dims(padded_img, 0)
# standardize
if config.STANDARDIZE:
padded_img = batch_normalize(padded_img, axis=(0, 1), c=1e-8)
imgn = padded_img
imgn = imgn.astype('float32')
padded_prediction = model.predict(imgn)[0]
prediction = padded_prediction[0:img.shape[0], 0:img.shape[1], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction * spline
# average the predictions in the overlapping regions
full_probs /= count_predictions
return full_probs
def predict_all(x, model, config, spline):
"""
Predict full scene using average predictions.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array average probabilities
----------
Example
----------
predict_all(x, model, config, spline)
"""
for i in range(8):
if i == 0: # reverse first dimension
x_seg = predict_windowing(
x[::-1, :, :], model, config, spline=spline
).transpose([2, 0, 1])
elif i == 1: # reverse second dimension
temp = predict_windowing(
x[:, ::-1, :], model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp[:, ::-1, :] + x_seg
elif i == 2: # transpose(interchange) first and second dimensions
temp = predict_windowing(
x.transpose([1, 0, 2]), model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp.transpose(0, 2, 1) + x_seg
gc.collect()
elif i == 3:
temp = predict_windowing(
np.rot90(x, 1), model, config, spline=spline
)
x_seg = np.rot90(temp, -1).transpose([2, 0, 1]) + x_seg
gc.collect()
elif i == 4:
temp = predict_windowing(
np.rot90(x, 2), model, config, spline=spline
)
x_seg = np.rot90(temp, -2).transpose([2, 0, 1]) + x_seg
elif i == 5:
temp = predict_windowing(
np.rot90(x, 3), model, config, spline=spline
)
x_seg = np.rot90(temp, -3).transpose(2, 0, 1) + x_seg
elif i == 6:
temp = predict_windowing(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
elif i == 7:
temp = predict_sliding(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
gc.collect()
del x, temp # delete arrays
x_seg /= 8.0
return x_seg.argmax(axis=0)
def predict_sliding_probs(x, model, config):
"""
Predict full scene using average predictions.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
Return:
prediction scene array average probabilities
----------
Example
----------
predict_sliding_probs(x, model, config, spline)
"""
# initial size: original tile (512, 512) - ((self.config.tile_size, ) * 2)
stride = config.TILE_SIZE - config.PRED_OVERLAP
shift = int((config.TILE_SIZE - stride) / 2)
print(f'Stride and shift: {stride}, {shift}')
height, width, num_channels = x.shape
if height % stride == 0:
num_h_tiles = int(height / stride)
else:
num_h_tiles = int(height / stride) + 1
if width % stride == 0:
num_w_tiles = int(width / stride)
else:
num_w_tiles = int(width / stride) + 1
rounded_height = num_h_tiles * stride
rounded_width = num_w_tiles * stride
padded_height = rounded_height + 2 * shift
padded_width = rounded_width + 2 * shift
padded = np.zeros((padded_height, padded_width, num_channels))
padded[shift:shift + height, shift: shift + width, :] = x
up = padded[shift:2 * shift, shift:-shift, :][:, ::-1]
padded[:shift, shift:-shift, :] = up
lag = padded.shape[0] - height - shift
bottom = padded[height + shift - lag:shift + height, shift:-shift, :][:, ::-1]
padded[height + shift:, shift:-shift, :] = bottom
left = padded[:, shift:2 * shift, :][:, :, ::-1]
padded[:, :shift, :] = left
lag = padded.shape[1] - width - shift
right = padded[:, width + shift - lag:shift + width, :][:, :, ::-1]
padded[:, width + shift:, :] = right
h_start = range(0, padded_height, stride)[:-1]
assert len(h_start) == num_h_tiles
w_start = range(0, padded_width, stride)[:-1]
assert len(w_start) == num_w_tiles
# get tiles out of the imagery
temp = []
for h in h_start:
for w in w_start:
temp += [padded[h:h + config.TILE_SIZE, w:w + config.TILE_SIZE, :]]
prediction = np.array(temp) # convert to numpy array
# standardize
if config.STANDARDIZE:
prediction = batch_normalize(prediction, axis=(0, 1), c=1e-8)
prediction = model.predict(prediction)
# iterate over given predictions
predicted_mask = np.zeros((rounded_height, rounded_width, config.N_CLASSES))
for j_h, h in enumerate(h_start):
for j_w, w in enumerate(w_start):
i = len(w_start) * j_h + j_w
predicted_mask[h: h + stride, w: w + stride, :] = \
prediction[i][shift:shift + stride, shift:shift + stride, :]
return predicted_mask[:height, :width, :]
def pred_mask(self, pr, threshold=0.50):
'''Predicted mask according to threshold'''
pr_cp = np.copy(pr)
pr_cp[pr_cp < threshold] = 0
pr_cp[pr_cp >= threshold] = 1
return pr_cp
def _2d_spline(window_size=128, power=2) -> np.array:
"""
Window method for boundaries/edge artifacts smoothing.
Args:
window_size (int): size of window/tile to smooth
power (int): spline polinomial power to use
Return:
smoothing distribution numpy array
----------
Example
----------
_2d_spline(window_size=128, power=2)
"""
intersection = int(window_size/4)
tria = scipy.signal.triang(window_size)
wind_outer = (abs(2*(tria)) ** power)/2
wind_outer[intersection:-intersection] = 0
wind_inner = 1 - (abs(2*(tria - 1)) ** power)/2
wind_inner[:intersection] = 0
wind_inner[-intersection:] = 0
wind = wind_inner + wind_outer
wind = wind / np.average(wind)
wind = np.expand_dims(np.expand_dims(wind, 1), 2)
wind = wind * wind.transpose(1, 0, 2)
return wind
def arr_to_tif(raster_f, segments, out_tif='segment.tif', ndval=-9999):
"""
Save array into GeoTIF file.
Args:
raster_f (str): input data filename
segments (numpy.array): array with values
out_tif (str): output filename
ndval (int): no data value
Return:
save GeoTif to local disk
----------
Example
----------
arr_to_tif('inp.tif', segments, 'out.tif', ndval=-9999)
"""
# get geospatial profile, will apply for output file
with rio.open(raster_f) as src:
meta = src.profile
nodatavals = src.read_masks(1).astype('int16')
print(meta)
# load numpy array if file is given
if type(segments) == str:
segments = np.load(segments)
segments = segments.astype('int16')
print(segments.dtype) # check datatype
nodatavals[nodatavals == 0] = ndval
segments[nodatavals == ndval] = nodatavals[nodatavals == ndval]
out_meta = meta # modify profile based on numpy array
out_meta['count'] = 1 # output is single band
out_meta['dtype'] = 'int16' # data type is float64
# write to a raster
with rio.open(out_tif, 'w', **out_meta) as dst:
dst.write(segments, 1)
``` |
{
"source": "jordancaraballo/xrasterlib",
"score": 2
} |
#### File: xrasterlib/archives/DgFile.py
```python
from datetime import datetime
import os, subprocess
import shutil
import xml.etree.ElementTree as ET
from osgeo.osr import SpatialReference
from osgeo import gdal
from EvhrEngine.management.GdalFile import GdalFile
#from EvhrEngine.management.SystemCommand import SystemCommand
#-------------------------------------------------------------------------------
# class DgFile
#
# This class represents a Digital Globe file. It is a single NITF file or a
# GeoTiff with an XML counterpart. It is unique because of the metadata tags
# within.
#-------------------------------------------------------------------------------
class DgFile(GdalFile):
#---------------------------------------------------------------------------
# __init__
#---------------------------------------------------------------------------
def __init__(self, fileName, logger = None):
# Check that the file is NITF or TIFF
extension = os.path.splitext(fileName)[1]
if extension != '.ntf' and extension != '.tif':
raise RuntimeError('{} is not a NITF or TIFF file'.format(fileName))
self.extension = extension
# Ensure the XML file exists.
xmlFileName = fileName.replace(self.extension, '.xml')
if not os.path.isfile(xmlFileName):
raise RuntimeError('{} does not exist'.format(xmlFileName))
self.xmlFileName = xmlFileName
# Initialize the base class.
super(DgFile, self).__init__(fileName, logger)
# These data member require the XML file counterpart to the TIF.
tree = ET.parse(self.xmlFileName)
self.imdTag = tree.getroot().find('IMD')
if self.imdTag is None:
raise RuntimeError('Unable to locate the "IMD" tag in ' + \
self.xmlFileName)
# If srs from GdalFile is empty, set srs, and get coords from the .xml
if not self.srs:
self.srs = SpatialReference()
self.srs.ImportFromEPSG(4326)
"""
Below is a temporary fix until ASP fixes dg_mosaic bug:
dg_mosaic outputs, along with a strip .tif, an aggregate .xml
file for all scene inputs. The .tif has no projection information,
so we have to get that from the output .xml. All bands *should*
have same extent in the .xml but a bug with ASP does not ensure
this is always true
for 4-band mosaics, the output extent is consitent among all bands
for 8-band mosaics, the first band (BAND_C) is not updated in the
output .xml, so we have to use second band (BAND_B).
# if no bug, first BAND tag will work for 8-band, 4-band, 1-band
bandTag = [n for n in self.imdTag.getchildren() if \
n.tag.startswith('BAND_')][0]
"""
try:
bandTag = [n for n in self.imdTag.getchildren() if \
n.tag.startswith('BAND_B')][0]
except IndexError: # Pan only has BAND_P
bandTag = [n for n in self.imdTag.getchildren() if \
n.tag.startswith('BAND_P')][0]
self.ulx = min(float(bandTag.find('LLLON').text), \
float(bandTag.find('ULLON').text))
self.uly = max(float(bandTag.find('ULLAT').text), \
float(bandTag.find('URLAT').text))
self.lrx = max(float(bandTag.find('LRLON').text), \
float(bandTag.find('URLON').text))
self.lry = min(float(bandTag.find('LRLAT').text), \
float(bandTag.find('LLLAT').text))
GdalFile.validateCoordinates(self) # Lastly, validate coordinates
# bandNameList
try:
self.bandNameList = \
[n.tag for n in self.imdTag if n.tag.startswith('BAND_')]
except:
self.bandNameList = None
# numBands
try:
self.numBands = self.dataset.RasterCount
except:
self.numBands = None
self.footprintsGml = None
#---------------------------------------------------------------------------
# abscalFactor()
#---------------------------------------------------------------------------
def abscalFactor(self, bandName):
if isinstance(bandName, str) and bandName.startswith('BAND_'):
return float(self.imdTag.find(bandName).find('ABSCALFACTOR').text)
else:
raise RuntimeError('Could not retrieve abscal factor.')
#---------------------------------------------------------------------------
# cloudCover()
#---------------------------------------------------------------------------
def cloudCover(self):
try:
cc = self.imdTag.find('IMAGE').find('CLOUDCOVER').text
if cc is None:
cc = self.dataset.GetMetadataItem('NITF_PIAIMC_CLOUDCVR')
return float(cc)
except:
return None
#---------------------------------------------------------------------------
# effectiveBandwidth()
#---------------------------------------------------------------------------
def effectiveBandwidth(self, bandName):
if isinstance(bandName, str) and bandName.startswith('BAND_'):
return float(self.imdTag. \
find(bandName). \
find('EFFECTIVEBANDWIDTH').text)
else:
raise RuntimeError('Could not retrieve effective bandwidth.')
#---------------------------------------------------------------------------
# firstLineTime()
#---------------------------------------------------------------------------
def firstLineTime(self):
try:
t = self.dataset.GetMetadataItem('NITF_CSDIDA_TIME')
if t is not None:
return datetime.strptime(t, "%Y%m%d%H%M%S")
else:
t = self.imdTag.find('IMAGE').find('FIRSTLINETIME').text
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
return None
#---------------------------------------------------------------------------
# getBand()
#---------------------------------------------------------------------------
def getBand(self, outputDir, bandName):
gdalBandIndex = int(self.bandNameList.index(bandName)) + 1
baseName = os.path.basename(self.fileName.replace(self.extension, \
'_b{}.tif'.format(gdalBandIndex)))
tempBandFile = os.path.join(outputDir, baseName)
if not os.path.exists(tempBandFile):
cmd = 'gdal_translate' + \
' -b {}'.format(gdalBandIndex) + \
' -a_nodata 0' + \
' -strict' + \
' -mo "bandName={}"'.format(bandName) + \
' ' + self.fileName + \
' ' + tempBandFile
# removed this to about using the SystemCommand Class.
# we migth be able to do this pythotonic way.
#sCmd = SystemCommand(cmd, self.fileName, self.logger)
#if sCmd.returnCode:
# tempBandFile = None
# Copy scene .xml to accompany the extracted .tif (needed for dg_mosaic)
shutil.copy(self.xmlFileName, tempBandFile.replace('.tif', '.xml'))
return tempBandFile
#---------------------------------------------------------------------------
# getBandName()
#---------------------------------------------------------------------------
def getBandName(self):
try:
return self.dataset.GetMetadataItem('bandName')
except:
return None
#---------------------------------------------------------------------------
# getCatalogId()
#---------------------------------------------------------------------------
def getCatalogId(self):
return self.imdTag.findall('./IMAGE/CATID')[0].text
#---------------------------------------------------------------------------
# getField
#---------------------------------------------------------------------------
def getField(self, nitfTag, xmlTag):
try:
value = self.dataset.GetMetadataItem(nitfTag)
if not value:
value = self.imdTag.find('IMAGE').find(xmlTag).text
return float(value)
except:
return None
#---------------------------------------------------------------------------
# getStripName()
#---------------------------------------------------------------------------
def getStripName(self):
try:
prodCode = None
if self.specTypeCode() == 'MS':
prodCode = 'M1BS'
else:
prodCode = 'P1BS'
dateStr = '{}{}{}'.format(self.year(),
str(self.firstLineTime().month).zfill(2),
str(self.firstLineTime().day).zfill(2))
return '{}_{}_{}_{}'.format(self.sensor(),
dateStr,
prodCode,
self.getCatalogId())
except:
return None
#---------------------------------------------------------------------------
# isMultispectral()
#---------------------------------------------------------------------------
def isMultispectral(self):
return self.specTypeCode() == 'MS'
#---------------------------------------------------------------------------
# isPanchromatic()
#---------------------------------------------------------------------------
def isPanchromatic(self):
return self.specTypeCode() == 'PAN'
#---------------------------------------------------------------------------
# meanSatelliteAzimuth
#---------------------------------------------------------------------------
def meanSatelliteAzimuth(self):
return self.getField('NITF_CSEXRA_AZ_OF_OBLIQUITY', 'MEANSATAZ')
#---------------------------------------------------------------------------
# meanSatelliteElevation
#---------------------------------------------------------------------------
def meanSatelliteElevation(self):
return self.getField('', 'MEANSATEL')
#---------------------------------------------------------------------------
# meanSunAzimuth
#---------------------------------------------------------------------------
def meanSunAzimuth(self):
return self.getField('NITF_CSEXRA_SUN_AZIMUTH', 'MEANSUNAZ')
#---------------------------------------------------------------------------
# meanSunElevation()
#---------------------------------------------------------------------------
def meanSunElevation(self):
return self.getField('NITF_CSEXRA_SUN_ELEVATION', 'MEANSUNEL')
#---------------------------------------------------------------------------
# prodLevelCode()
#---------------------------------------------------------------------------
def prodLevelCode(self):
try:
return self.imdTag.find('PRODUCTLEVEL').text
except:
return None
#---------------------------------------------------------------------------
# sensor()
#---------------------------------------------------------------------------
def sensor(self):
try:
sens = self.dataset.GetMetadataItem('NITF_PIAIMC_SENSNAME')
if sens is None:
sens = self.imdTag.find('IMAGE').find('SATID').text
return sens
except:
return None
#---------------------------------------------------------------------------
# setBandName()
#---------------------------------------------------------------------------
def setBandName(self, bandName):
self.dataset.SetMetadataItem("bandName", bandName)
#---------------------------------------------------------------------------
# specTypeCode()
#---------------------------------------------------------------------------
def specTypeCode(self):
try:
stc = self.dataset.GetMetadataItem('NITF_CSEXRA_SENSOR')
if stc is None:
if self.imdTag.find('BANDID').text == 'P':
stc = 'PAN'
elif self.imdTag.find('BANDID').text == 'MS1' or \
self.imdTag.find('BANDID').text == 'Multi':
stc = 'MS'
return stc
except:
return None
#---------------------------------------------------------------------------
# toBandInterleavedBinary()
#---------------------------------------------------------------------------
def toBandInterleavedBinary(self, outputDir):
outBin = os.path.join(outputDir,
os.path.basename(self.fileName.replace(self.extension, '.bin')))
try:
ds = gdal.Open(self.fileName)
ds = gdal.Translate(outBin, ds, creationOptions = ["INTERLEAVE=BAND"])
ds = None
return outBin
except:
return None
#---------------------------------------------------------------------------
# year()
#---------------------------------------------------------------------------
def year(self):
try:
yr = self.dataset.GetMetadataItem('NITF_CSDIDA_YEAR')
if yr is None:
yr = self.firstLineTime().year
return yr
except:
return None
```
#### File: xrasterlib/archives/train_apply_RandomForests__csv.py
```python
# Import GDAL, NumPy, and matplotlib
import sys, os
from osgeo import gdal, gdal_array
import numpy as np
#import matplotlib.pyplot as plt
##%matplotlib inline # IPython
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
import pandas as pd
#import skimage.io as io
from timeit import default_timer as timer
# Tell GDAL to throw Python exceptions, and register all drivers
gdal.UseExceptions()
gdal.AllRegister()
##import gdal
##from osgeo.gdal import *
gdal.UseExceptions() # enable exceptions to report errors
drvtif = gdal.GetDriverByName("GTiff")
n_trees = 20
max_feat = 'log2'
modelName = '{}_{}'.format(n_trees, max_feat) # to distinguish between parameters of each model/output
extentName = sys.argv[1] # model number or name (can be blank) so we can save to different directories
# Hardcode for now:
bands = [1, 2, 3, 4, 5] # bands of the image stack #ANDREW
band_names = ['Blue', 'Green', 'Red', 'NIR', 'NDVI'] # the order of the stack #ANDREW
def find_elapsed_time(start, end): # example time = round(find_elapsed_time(start, end),3) where start and end = timer()
elapsed_min = (end-start)/60
return float(elapsed_min)
"""Function to read data stack into img object"""
def stack_to_obj(VHRstack):
img_ds = gdal.Open(VHRstack, gdal.GA_ReadOnly) # GDAL dataset
gt = img_ds.GetGeoTransform()
proj = img_ds.GetProjection()
ncols = img_ds.RasterXSize
nrows = img_ds.RasterYSize
ndval = img_ds.GetRasterBand(1).GetNoDataValue() # should be -999 for all layers, unless using scene as input
imgProperties = (gt, proj, ncols, nrows, ndval)
""" Read data stack into array """
img = np.zeros((nrows, ncols, img_ds.RasterCount), gdal_array.GDALTypeCodeToNumericTypeCode(img_ds.GetRasterBand(1).DataType))
for b in range(img.shape[2]): # the 3rd index of img.shape gives us the number of bands in the stack
print '\nb: {}'.format(b)
img[:, :, b] = img_ds.GetRasterBand(b + 1).ReadAsArray() # GDAL is 1-based while Python is 0-based
return (img, imgProperties)
"""Function to write final classification to tiff"""
def array_to_tif(inarr, outfile, imgProperties):
# get properties from input
(gt, proj, ncols, nrows, ndval) = imgProperties
print ndval
drv = drvtif.Create(outfile, ncols, nrows, 1, 3, options = [ 'COMPRESS=LZW' ]) # 1= number of bands (i think) and 3 = Data Type (16 bit signed)
drv.SetGeoTransform(gt)
drv.SetProjection(proj)
drv.GetRasterBand(1).SetNoDataValue(ndval)
drv.GetRasterBand(1).WriteArray(inarr)
return outfile
"""Function to run diagnostics on model"""
def run_diagnostics(model_save, X, y): # where model is the model object, X and y are training sets
# load model for use:
print "\nLoading model from {} for cross-val".format(model_save)
model_load = joblib.load(model_save) # nd load
print "\n\nDIAGNOSTICS:\n"
try:
print "n_trees = {}".format(n_trees)
print "max_features = {}\n".format(max_feat)
except Exception as e:
print "ERROR: {}\n".format(e)
# check Out of Bag (OOB) prediction score
print 'Our OOB prediction of accuracy is: {}\n'.format(model_load.oob_score_ * 100)
print "OOB error: {}\n".format(1 - model_load.oob_score_)
# check the importance of the bands:
for b, imp in zip(bands, model_load.feature_importances_):
print 'Band {b} ({name}) importance: {imp}'.format(b=b, name=band_names[b-1], imp=imp)
print ''
"""
# see http://scikit-learn.org/stable/modules/cross_validation.html for how to use rf.score etc
"""
# dont really know if this is applicable for 2 classes but try it anyway:
# Setup a dataframe -- just like R
df = pd.DataFrame() #**** Need to create a new y with validation points, like we did with y in the function below (make roi be valid sites array instead of training)
df['truth'] = y
df['predict'] = model_load.predict(X)
# Cross-tabulate predictions
print pd.crosstab(df['truth'], df['predict'], margins=True)
## print "Other:"
## print model.criterion
## print model.estimator_params
## print model.score
## print model.feature_importances_
## print ''
def apply_model(VHRstack, classDir, model_save): # VHR stack we are applying model to, output dir, and saved model
(img, imgProperties) = stack_to_obj(VHRstack)
(gt, proj, ncols, nrows, ndval) = imgProperties # ndval is nodata val of image stack not sample points
"""
print img
print img.shape
print np.unique(img)
"""
# Classification of img array and save as image (5 refers to the number of bands in the stack)
# reshape into long 2d array (nrow * ncol, nband) for classification
new_shape = (img.shape[0] * img.shape[1], img.shape[2])
img_as_array = img[:, :, :img.shape[2]].reshape(new_shape) # 5 is number of layers
## print img_as_array.shape # (192515625, 5)
## print np.unique(img_as_array) # [ -999 -149 -146 ..., 14425 14530 14563]
print 'Reshaped from {o} to {n}'.format(o=img.shape, n=img_as_array.shape)
print "\nLoading model from {}".format(model_save)
model_load = joblib.load(model_save) # nd load
print "\nModel information:\n{}".format(model_load)
# Now predict for each pixel
print "\nPredicting model on image array"
class_prediction = model_load.predict(img_as_array)
#* at some point may need to convert values that were -999 in img array back to -999, depending on what rf does to those areas
## print img[:, :, 0].shape # (13875, 13875)
## print img[:, :, 0]
# Reshape our classification map and convert to 16-bit signed
class_prediction = class_prediction.reshape(img[:, :, 0].shape).astype(np.int16)
## print class_prediction # numpy array? what?
## print class_prediction.shape # (13875, 13875)
## print class_prediction.dtype #uint8
## print np.unique(class_prediction) # [1 2]
## print img.shape # (13875, 13875, 5)
## print np.unique(img) # [ -999 -149 -146 ..., 14425 14530 14563]
# Now we need to convert existing NoData values back to NoData (-999, or 0 if at scene-level)
class_prediction[img[:, :, 0] == ndval] = ndval # just chose the 0th index to find where noData values are (should be the same for all MS layers, not ure about PAn)
# use from old method to save to tif
## print np.unique(class_prediction)
## print ndval
# export classificaiton to tif
classification = os.path.join(classDir, "{}__{}__classified.tif".format(extentName, modelName))
array_to_tif(class_prediction, classification, imgProperties)
## io.imsave(classification, class_prediction)
print "\nWrote map output to {}".format(classification)
return
"""Function for training the model using training data"""
# To train the model, you need: input text file/csv, model output location, model parameters
def train_model(X, y, modelDir, n_trees, max_feat):
n_samples = np.shape(X)[0] # the first index of the shape of X will tell us how many sample points
print '\nWe have {} samples'.format(n_samples)
labels = np.unique(y) # now it's the unique values in y array from text file
print 'The training data include {n} classes: {classes}'.format(n=labels.size, classes=labels)
print 'Our X matrix is sized: {sz}'.format(sz=X.shape) # hopefully will be (?, 4)
print 'Our y array is sized: {sz}'.format(sz=y.shape)
""" Now train the model """
print "\nInitializing model..."
rf = RandomForestClassifier(n_estimators=n_trees, max_features=max_feat, oob_score=True) # can change/add other settings later
print "\nTraining model..."
rf.fit(X, y) # fit model to training data
print 'score:', rf.oob_score_
# Export model:
try:
model_save = os.path.join(modelDir, "model_{}_{}.pkl".format(extentName, modelName))
joblib.dump(rf, model_save)
except Exception as e:
print "Error: {}".format(e)
return model_save # Return output model for application and validation
def get_test_training_sets(inputCsv):
with open(inputCsv, 'r') as it:
cnt = 0
for r in it.readlines():
cnt += 1
rline = r.strip('\n').strip().split(',')
xx = np.array(rline[0:-1]) # xx is the line except the last entry (class)
yy = np.array(rline[-1]) # yy is the last entry in the line
if cnt == 1:
X = [xx]
y = yy
else:
X = np.vstack((X, xx))
y = np.append(y, yy)
# Convert y array to integer type
y = y.astype(float).astype(int)
# Now we have X and y, but this is not split into validation and training. Do that here:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state = 21)
return (X_train, X_test, y_train, y_test)
"""Function to convert stack and sample imagery into X and y [not yet split into training/validation]"""
def convert_img_to_Xy_data(VHRstack, sampleTif): # Given a data stack and sample tiff, we can convert these into X and y for Random Forests
""" Read in the raster stack and training data to numpy array: """
img_ds = gdal.Open(VHRstack, gdal.GA_ReadOnly) # GDAL dataset
roi_ds = gdal.Open(sampleTif, gdal.GA_ReadOnly)
""" get geo metadata so we can write the classification back to tiff """
gt = img_ds.GetGeoTransform()
proj = img_ds.GetProjection()
ncols = img_ds.RasterXSize
nrows = img_ds.RasterYSize
ndval = img_ds.GetRasterBand(1).GetNoDataValue() # should be -999 for all layers, unless using scene as input
imgProperties = (gt, proj, ncols, nrows, ndval)
""" Read data stack into array """
img = np.zeros((nrows, ncols, img_ds.RasterCount), gdal_array.GDALTypeCodeToNumericTypeCode(img_ds.GetRasterBand(1).DataType))
for b in range(img.shape[2]): # the 3rd index of img.shape gives us the number of bands in the stack
print '\nb: {}'.format(b)
img[:, :, b] = img_ds.GetRasterBand(b + 1).ReadAsArray() # GDAL is 1-based while Python is 0-based
""" Read Training dataset into numpy array """
roi = roi_ds.GetRasterBand(1).ReadAsArray().astype(np.uint8)
roi_nd = roi_ds.GetRasterBand(1).GetNoDataValue() # get no data value of training dataset
roi[roi==roi_nd] = 0 # Convert the No Data pixels in raster to 0 for the model
X = img[roi > 0, :] # Data Stack pixels
y = roi[roi > 0] # Training pixels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state = 21)
return (X_train, X_test, y_train, y_test)
#def main():
start = timer()
print "Running"
# Set up directories:
ddir = '/att/gpfsfs/briskfs01/ppl/mwooten3/MCC/RandomForests'
testdir = os.path.join(ddir, 'test')
VHRdir = testdir
trainDir = testdir
modelDir = testdir
classDir = testdir
logDir = testdir
"""
for d in [modelDir, classDir, logDir]:
os.system('mkdir -p {}'.format(d))
print "Running"
# Log processing:
logfile = os.path.join(logDir, 'Log_{}trees_{}.txt'.format(n_trees, max_feat))
so = se = open(logfile, 'w', 0) # open our log file
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # re-open stdout without buffering
os.dup2(so.fileno(), sys.stdout.fileno()) # redirect stdout and stderr to the log file opened above
os.dup2(se.fileno(), sys.stderr.fileno())
"""
# Get the raster stack and sample data
VHRstack = os.path.join(VHRdir, 'WV02_20170427_M1BS_103001006855B300-toa__stack.tif') # Stack of VHR data (Multispec and Pan)
## trainTif = os.path.join(trainDir, 'training__{}.tif'.format(extentName)) # Training Data
## validTif = os.path.join(trainDir, 'valid__{}.tif'.format(extentName)) # Validation Data
# IF input is csv file, use this method
"""Before calling the model train or apply, read and configure the inputs into test and train """
inCsv = os.path.join(trainDir, 'WV02_20170427_M1BS_103001006855B300-toa__training.csv')
(X_train, X_test, y_train, y_test) = get_test_training_sets(inCsv)
# Train and apply models:
print "Building model with n_trees={} and max_feat={}...".format(n_trees, max_feat)
model_save = train_model(X_train, y_train, modelDir, n_trees, max_feat)
print "\nApplying model to rest of imagery"
apply_model(VHRstack, classDir, model_save)
run_diagnostics(model_save, X_test, y_test)
elapsed = round(find_elapsed_time(start, timer()),3)
print "\n\nElapsed time = {}".format(elapsed)
#main()
```
#### File: examples/rasters/wv_indices.py
```python
from terragpu import io
from terragpu import engine
from terragpu.array.raster import Raster
from terragpu.indices.wv_indices import add_indices
from terragpu.engine import array_module, df_module
xp = array_module()
xf = df_module()
def main(filename, bands):
# Read imagery
raster = io.imread(filename, bands)
print(raster)
# Calculate some indices
raster = add_indices(raster, indices=[
'dvi', 'ndvi', 'cs1', 'cs2', 'si', 'fdi', 'dwi',
'ndwi', 'gndvi', 'sr'])
print(raster)
# Save to directory
io.imsave(raster, "/lscratch/jacaraba/output.tif", crs="EPSG:32618")
return
if __name__ == '__main__':
# filename to open
filename = '/att/nobackup/jacaraba/AGU2021/terragpu/terragpu/test/data/WV02_Gonji.tif'
bands = [
'CoastalBlue',
'Blue',
'Green',
'Yellow',
'Red',
'RedEdge',
'NIR1',
'NIR2'
]
# Start dask cluster - dask scheduler must be started from main
if xp.__name__ == 'cupy':
engine.configure_dask(
device='gpu',
n_workers=4,
local_directory='/lscratch/jacaraba')
# Execute main function and calculate indices
main(filename, bands)
```
#### File: xrasterlib/scripts/relabel.py
```python
import numpy as np
import tifffile as tiff
import rasterio as rio
import cv2
def npy_to_tif(raster_f='image.tif', segments='segment.npy',
outtif='segment.tif'
):
# get geospatial profile, will apply for output file
with rio.open(raster_f) as src:
meta = src.profile
print(meta)
# load numpy array if file is given
if type(segments) == str:
segments = np.load(segments)
segments = segments.astype('int16')
print(segments.dtype) # check datatype
out_meta = meta # modify profile based on numpy array
out_meta['count'] = 1 # output is single band
out_meta['dtype'] = 'int16' # data type is float64
# write to a raster
with rio.open(outtif, 'w', **out_meta) as dst:
dst.write(segments, 1)
def int_ov_uni(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
return np.sum(intersection) / np.sum(union)
image = 'subsetLarge.tif'
mask = tiff.imread(image)
x_seg = cv2.resize(mask, (mask.shape[0], mask.shape[1]),
interpolation=cv2.INTER_NEAREST)
kk = (50, 50)
kk = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
# kernelc = np.ones(kk,dtype='uint8')
# kerneld = np.ones(kk,dtype='uint8')
# kernele = np.ones(kk,dtype='uint8')
closing = cv2.morphologyEx(x_seg, cv2.MORPH_CLOSE, kk)
dilation = cv2.dilate(x_seg, kk, iterations=5)
erosion = cv2.erode(x_seg, kk, iterations=1)
tophat = cv2.morphologyEx(x_seg, cv2.MORPH_TOPHAT, kk)
gradient = cv2.morphologyEx(x_seg, cv2.MORPH_GRADIENT, kk)
blackhat = cv2.morphologyEx(x_seg, cv2.MORPH_BLACKHAT, kk)
opening = cv2.morphologyEx(x_seg, cv2.MORPH_OPEN, kk)
kkk = np.ones((5, 5), np.float32)/25
smooth = cv2.filter2D(x_seg, -1, kkk)
npy_to_tif(raster_f=image, segments=closing, outtif='relabeled-cv2c.tif')
npy_to_tif(raster_f=image, segments=dilation, outtif='relabeled-cv2d.tif')
npy_to_tif(raster_f=image, segments=erosion, outtif='relabeled-cv2-ers.tif')
npy_to_tif(raster_f=image, segments=tophat, outtif='relabeled-cv2-th.tif')
npy_to_tif(raster_f=image, segments=gradient, outtif='relabeled-cv2-grad.tif')
npy_to_tif(raster_f=image, segments=blackhat, outtif='relabeled-cv2-bh.tif')
npy_to_tif(raster_f=image, segments=opening, outtif='relabeled-cv2-open.tif')
npy_to_tif(raster_f=image, segments=smooth, outtif='relabeled-cv2-smooth.tif')
print("unique labels: ", np.unique(mask))
```
#### File: ai/deep_learning/loss.py
```python
import logging
import numpy as np
from tensorflow.keras import backend as K
import tensorflow as tf
__author__ = "<NAME>, Science Data Processing Branch"
__email__ = "<EMAIL>"
__status__ = "Production"
# ---------------------------------------------------------------------------
# module metrics
#
# General functions to compute custom losses.
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Module Methods
# ---------------------------------------------------------------------------
# ------------------------------ Loss Functions -------------------------- #
def dice_coef_bin_loss(y_true, y_pred):
return 1.0 - dice_coef_bin(y_true, y_pred)
def dice_coef_bin(y_true, y_pred, smooth=1e-7):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / \
(K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef(y_true, y_pred, smooth=1e-7):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / \
(K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred, numLabels=6):
dice = 0
for index in range(numLabels):
dice -= dice_coef(y_true[:, :, :, index],
y_pred[:, :, :, index]
)
return dice
def generalized_dice(y_true, y_pred, exp):
GD = []
print(K.shape(y_pred))
smooth = 1.0
for i in range(y_pred.shape[2]):
y_true_per_label = K.cast(
K.not_equal(y_true[:, :, i], 0), K.floatx()
) # Change to binary
y_pred_per_label = y_pred[:, :, i]
weight = K.pow(1/K.sum(y_true_per_label, axis=1), exp)
intersection = K.sum(y_true_per_label * y_pred_per_label, axis=1)
union = K.sum(y_true_per_label + y_pred_per_label, axis=1)
GD.append(weight * (2. * intersection + smooth) / (union + smooth))
GD_out = K.stack(GD)
return GD_out
def exp_dice_loss(exp=1.0):
"""
:param exp: exponent. 1.0 for no exponential effect, i.e. log Dice.
"""
def inner(y_true, y_pred):
"""
Computes the average exponential log Dice coefficients
:param y_true: one-hot tensor * by label weights, (bsize, n_pix, n_lab)
:param y_pred: softmax probabilities, same shape as y_true
:return: average exponential log Dice coefficient.
"""
dice = dice_coef(y_true, y_pred)
dice = K.clip(dice, K.epsilon(), 1 - K.epsilon())
dice = K.pow(-K.log(dice), exp)
if K.ndim(dice) == 2:
dice = K.mean(dice, axis=-1)
return dice
return inner
def ce_dl_bin(y_true, y_pred):
def dice_loss(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return 1 - (2. * intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
return K.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
def jaccard_distance_loss(y_true, y_pred, numLabels=6, smooth=100):
"""
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
The jaccard distance loss is usefull for unbalanced datasets.
This has been shifted so it converges on 0 and is smoothed to
avoid exploding or disapearing gradient.
Ref: https://en.wikipedia.org/wiki/Jaccard_index
@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
@author: wassname
Modified by jordancaraballo to support one-hot tensors.
"""
jacloss = 0
for index in range(numLabels):
y_true_f = K.flatten(y_true[:, :, :, index])
y_pred_f = K.flatten(y_pred[:, :, :, index])
intersection = K.sum(K.abs(y_true_f * y_pred_f))
sum_ = K.sum(K.abs(y_true_f) + K.abs(y_pred_f))
jac = (intersection + smooth) / (sum_ - intersection + smooth)
jacloss += (1 - jac) * smooth
return jacloss
def tanimoto_loss(label, pred):
"""
Softmax version of tanimoto loss.
Nc Np
∑ wj ∑ pij * lij
J=1 i=1
T(pij,lij) = -------------------------------------
Nc Np
∑ wj ∑ ((pij)^2 + (lij)^2 - pij * lij)
J=1 i=1
where:
Nc = n classes; Np = n pixels i; wj = weights per class J
pij = probability of pixel for class J
lij = label of pixel for class J
wj can be calculated straight from the last layer or using
wj = (Vj)^-2, where Vj is the total sum of true positives per class
"""
square = tf.square(pred)
sum_square = tf.reduce_sum(square, axis=-1)
product = tf.multiply(pred, label)
sum_product = tf.reduce_sum(product, axis=-1)
denominator = tf.subtract(tf.add(sum_square, 1), sum_product)
loss = tf.divide(sum_product, denominator)
loss = tf.reduce_mean(loss)
return 1.0-loss
def tanimoto_dual_loss(label, pred):
"""
Dual Tanimoto loss
~
T(pij,lij) = ( Tanimoto(pij,lij) + Tanimoto(1-pij, 1-lij) ) / 2.0
"""
loss1 = tanimoto_loss(pred, label)
pred = tf.subtract(1.0, pred)
label = tf.subtract(1.0, label)
loss2 = tanimoto_loss(pred, label)
loss = (loss1+loss2)/2.0
return loss
def focal_loss_bin(gamma=2., alpha=.25):
"""
Binary form of focal loss.
FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t)
where p = sigmoid(x), p_t = p or 1 - p depending on if
the label is 1 or 0, respectively.
References:
https://arxiv.org/pdf/1708.02002.pdf
Usage:
model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)],
metrics=["accuracy"], optimizer=adam)
"""
def focal_loss_bin_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a sigmoid
:return: Output tensor.
"""
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN's and Inf's
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
return - K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \
- K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_bin_fixed
def focal_loss_cat(gamma=2., alpha=.25):
"""
Softmax version of focal loss.
m
FL = ∑ -alpha * (1 - p_o,c)^gamma * y_o,c * log(p_o,c)
c=1
where m = number of classes, c = class and o = observation
Parameters:
alpha -- the same as weighing factor in balanced cross entropy
gamma -- focusing parameter for modulating factor (1-p)
Default value:
gamma -- 2.0 as mentioned in the paper
alpha -- 0.25 as mentioned in the paper
References:
Official paper: https://arxiv.org/pdf/1708.02002.pdf
https://www.tensorflow.org/api_docs/python/tf/keras/backend/categorical_crossentropy
Usage:
model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)],
metrics=["accuracy"], optimizer=adam)
"""
def focal_loss_cat_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a softmax
:return: Output tensor.
"""
# Scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# Clip the prediction value to prevent NaN's and Inf's
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# Calculate Cross Entropy
cross_entropy = -y_true * K.log(y_pred)
# Calculate Focal Loss
loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
# Compute mean loss in mini_batch
return K.mean(loss, axis=1)
return focal_loss_cat_fixed
def tversky_negative(y_true, y_pred, smooth=1, alpha=0.7):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1 - y_pred_pos))
false_pos = K.sum((1 - y_true_pos) * y_pred_pos)
return (true_pos + smooth) / \
(true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true, y_pred)
def focal_tversky_loss(y_true, y_pred, gamma=0.75):
tv = tversky(y_true, y_pred)
return K.pow((1 - tv), gamma)
def tversky(y_true, y_pred, alpha=0.6, beta=0.4):
"""
Function to calculate the Tversky loss for imbalanced data
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param alpha: weight of false positives
:param beta: weight of false negatives
:param weight_map:
:return: the loss
"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
# weights
y_weights = y_true[..., 1]
y_weights = y_weights[..., np.newaxis]
ones = 1
p0 = y_pred # proba that voxels are class i
p1 = ones - y_pred # proba that voxels are not class i
g0 = y_t
g1 = ones - y_t
tp = tf.reduce_sum(y_weights * p0 * g0)
fp = alpha * tf.reduce_sum(y_weights * p0 * g1)
fn = beta * tf.reduce_sum(y_weights * p1 * g0)
EPSILON = 0.00001
numerator = tp
denominator = tp + fp + fn + EPSILON
score = numerator / denominator
return 1.0 - tf.reduce_mean(score)
def true_positives(y_true, y_pred):
"""compute true positive"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
return K.round(y_t * y_pred)
def false_positives(y_true, y_pred):
"""compute false positive"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
return K.round((1 - y_t) * y_pred)
def true_negatives(y_true, y_pred):
"""compute true negative"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
return K.round((1 - y_t) * (1 - y_pred))
def false_negatives(y_true, y_pred):
"""compute false negative"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
return K.round((y_t) * (1 - y_pred))
def sensitivity(y_true, y_pred):
"""compute sensitivity (recall)"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
tp = true_positives(y_t, y_pred)
fn = false_negatives(y_t, y_pred)
return K.sum(tp) / (K.sum(tp) + K.sum(fn))
def specificity(y_true, y_pred):
"""compute specificity (precision)"""
y_t = y_true[..., 0]
y_t = y_t[..., np.newaxis]
tn = true_negatives(y_t, y_pred)
fp = false_positives(y_t, y_pred)
return K.sum(tn) / (K.sum(tn) + K.sum(fp))
# -------------------------------------------------------------------------------
# module model Unit Tests
# -------------------------------------------------------------------------------
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# Add unit tests here
```
#### File: deep_learning/models/unet_model.py
```python
import torch
from pytorch_lightning import LightningModule
from torch.nn import functional as F
from pl_bolts.models.vision.unet import UNet
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
from torchmetrics import MetricCollection, Accuracy, IoU
# -------------------------------------------------------------------------------
# class UNet
# This class performs training and classification of satellite imagery using a
# UNet CNN.
# -------------------------------------------------------------------------------
@MODEL_REGISTRY
class UNetSegmentation(LightningModule):
# ---------------------------------------------------------------------------
# __init__
# ---------------------------------------------------------------------------
def __init__(
self,
input_channels: int = 4,
num_classes: int = 19,
num_layers: int = 5,
features_start: int = 64,
bilinear: bool = False,
):
super().__init__()
self.input_channels = input_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.features_start = features_start
self.bilinear = bilinear
self.net = UNet(
input_channels=self.input_channels,
num_classes=num_classes,
num_layers=self.num_layers,
features_start=self.features_start,
bilinear=self.bilinear,
)
metrics = MetricCollection(
[
Accuracy(), IoU(num_classes=self.num_classes)
]
)
self.train_metrics = metrics.clone(prefix='train_')
self.val_metrics = metrics.clone(prefix='val_')
# ---------------------------------------------------------------------------
# model methods
# ---------------------------------------------------------------------------
def forward(self, x):
return self.net(x)
def training_step(self, batch, batch_nb):
img, mask = batch
img, mask = img.float(), mask.long()
# Forward step, calculate logits and loss
logits = self(img)
# loss_val = F.cross_entropy(logits, mask)
# Get target tensor from logits for metrics, calculate metrics
probs = torch.nn.functional.softmax(logits, dim=1)
probs = torch.argmax(probs, dim=1)
# metrics_train = self.train_metrics(probs, mask)
# log_dict = {"train_loss": loss_val.detach()}
# return {"loss": loss_val, "log": log_dict, "progress_bar": log_dict}
# return {
# "loss": loss_val, "train_acc": metrics_train['train_Accuracy'],
# "train_iou": metrics_train['train_IoU']
# }
tensorboard_logs = self.train_metrics(probs, mask)
tensorboard_logs['loss'] = F.cross_entropy(logits, mask)
# tensorboard_logs['lr'] = self._get_current_lr()
self.log(
'acc', tensorboard_logs['train_Accuracy'],
sync_dist=True, prog_bar=True
)
self.log(
'iou', tensorboard_logs['train_IoU'],
sync_dist=True, prog_bar=True
)
return tensorboard_logs
def training_epoch_end(self, outputs):
# Get average metrics from multi-GPU batch sources
loss_val = torch.stack([x["loss"] for x in outputs]).mean()
acc_train = torch.stack([x["train_acc"] for x in outputs]).mean()
iou_train = torch.stack([x["train_iou"] for x in outputs]).mean()
tensorboard_logs = self.train_metrics(probs, mask)
tensorboard_logs['loss'] = F.cross_entropy(logits, mask)
# tensorboard_logs['lr'] = self._get_current_lr()
self.log(
'acc', tensorboard_logs['train_Accuracy'],
sync_dist=True, prog_bar=True
)
self.log(
'iou', tensorboard_logs['train_IoU'],
sync_dist=True, prog_bar=True
)
return tensorboard_logs
# # Send output to logger
# self.log(
# "loss", loss_val, on_epoch=True, prog_bar=True, logger=True)
# self.log(
# "train_acc", acc_train, on_epoch=True, prog_bar=True, logger=True)
# self.log(
# "train_iou", iou_train, on_epoch=True, prog_bar=True, logger=True)
def validation_step(self, batch, batch_idx):
# Get data, change type for validation
img, mask = batch
img, mask = img.float(), mask.long()
# Forward step, calculate logits and loss
logits = self(img)
# loss_val = F.cross_entropy(logits, mask)
# Get target tensor from logits for metrics, calculate metrics
probs = torch.nn.functional.softmax(logits, dim=1)
probs = torch.argmax(probs, dim=1)
metrics_val = self.val_metrics(probs, mask)
# return {
# "val_loss": loss_val, "val_acc": metrics_val['val_Accuracy'],
# "val_iou": metrics_val['val_IoU']
# }
tensorboard_logs = self.val_metrics(probs, mask)
tensorboard_logs['val_loss'] = F.cross_entropy(logits, mask)
self.log(
'val_loss', tensorboard_logs['val_loss'],
sync_dist=True, prog_bar=True
)
self.log(
'val_acc', tensorboard_logs['val_Accuracy'],
sync_dist=True, prog_bar=True
)
self.log(
'val_iou', tensorboard_logs['val_IoU'],
sync_dist=True, prog_bar=True
)
return tensorboard_logs
#def validation_epoch_end(self, outputs):
# # Get average metrics from multi-GPU batch sources
# loss_val = torch.stack([x["val_loss"] for x in outputs]).mean()
# acc_val = torch.stack([x["val_acc"] for x in outputs]).mean()
# iou_val = torch.stack([x["val_iou"] for x in outputs]).mean()
# # Send output to logger
# self.log(
# "val_loss", torch.mean(self.all_gather(loss_val)),
# on_epoch=True, prog_bar=True, logger=True)
# self.log(
# "val_acc", torch.mean(self.all_gather(acc_val)),
# on_epoch=True, prog_bar=True, logger=True)
# self.log(
# "val_iou", torch.mean(self.all_gather(iou_val)),
# on_epoch=True, prog_bar=True, logger=True)
# def configure_optimizers(self):
# opt = torch.optim.Adam(self.net.parameters(), lr=self.lr)
# sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10)
# return [opt], [sch]
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
```
#### File: terragpu/ai/preprocessing.py
```python
import random
from typing import Union
from tqdm import tqdm
import math
import xarray as xr
# import tensorflow as tf
from sklearn.utils.class_weight import compute_class_weight
from sklearn import feature_extraction
from terragpu.engine import array_module, df_module
xp = array_module()
xf = df_module()
import numpy as np
# -------------------------------------------------------------------------
# Preprocess methods - Modify
# -------------------------------------------------------------------------
def modify_bands(
img: xr.core.dataarray.DataArray, input_bands: list,
output_bands: list, drop_bands: list = []):
"""
Drop multiple bands to existing rasterio object
"""
# Do not modify if image has the same number of output bands
if img.shape[0] == len(output_bands):
return img
# Drop any bands from input that should not be on output
for ind_id in list(set(input_bands) - set(output_bands)):
drop_bands.append(input_bands.index(ind_id)+1)
img = img.drop(dim="band", labels=drop_bands, drop=True)
return img
def modify_roi(
img: xp.ndarray, label: xp.ndarray,
ymin: int, ymax: int, xmin: int, xmax: int):
"""
Crop ROI, from outside to inside based on pixel address
"""
return img[ymin:ymax, xmin:xmax], label[ymin:ymax, xmin:xmax]
def modify_pixel_extremity(
img: xp.ndarray, xmin: int = 0, xmax: int = 10000):
"""
Crop ROI, from outside to inside based on pixel address
"""
return xp.clip(img, xmin, xmax)
def modify_label_classes(label: xp.ndarray, expressions: str):
"""
Change pixel label values based on expression
Example: "x != 1": 0, convert all non-1 values to 0
"""
for exp in expressions:
[(k, v)] = exp.items()
label[eval(k, {k.split(' ')[0]: label})] = v
return label
# def modify_label_dims(labels, n_classes: int):
# if n_classes > 2:
# if labels.min() == 1:
# labels = labels - 1
# return tf.keras.utils.to_categorical(
# labels, num_classes=n_classes, dtype='float32')
# else:
# return xp.expand_dims(labels, axis=-1).astype(xp.float32)
# -------------------------------------------------------------------------
# Preprocess methods - Get
# -------------------------------------------------------------------------
def get_std_mean(images, output_filename: str):
means = xp.mean(images, axis=tuple(range(images.ndim-1)))
stds = xp.std(images, axis=tuple(range(images.ndim-1)))
xp.savez(output_filename, mean=means, std=stds)
return means, stds
def get_class_weights(labels):
weights = compute_class_weight(
'balanced',
xp.unique(xp.ravel(labels, order='C')),
xp.ravel(labels, order='C'))
return weights
# -------------------------------------------------------------------------
# Preprocess methods - Calculate
# -------------------------------------------------------------------------
def calc_ntiles(
width: int, height: int, tile_size: int, max_patches: float = 1):
if isinstance(max_patches, int):
return max_patches
else:
ntiles = (
(math.ceil(width / tile_size)) * (math.ceil(height / tile_size)))
return int(round(ntiles * max_patches))
# -------------------------------------------------------------------------
# Preprocess methods - Generate
# -------------------------------------------------------------------------
def gen_random_tiles(
image: xp.ndarray, label: xp.ndarray, tile_size: int = 128,
max_patches: Union[int, float] = None, seed: int = 24):
"""
Extract small patches for final dataset
Args:
img (numpy array - c, y, x): imagery data
tile_size (tuple): 2D dimensions of tile
random_state (int): seed for reproducibility (match image and mask)
n_patches (int): number of tiles to extract
"""
# Calculate ntiles based on user input
ntiles = calc_ntiles(
width=image.shape[0], height=image.shape[1],
tile_size=tile_size, max_patches=max_patches)
images_list = [] # list to store data patches
labels_list = [] # list to store label patches
for i in tqdm(range(ntiles)):
# Generate random integers from image
x = random.randint(0, image.shape[0] - tile_size)
y = random.randint(0, image.shape[1] - tile_size)
while image[x: (x + tile_size), y: (y + tile_size), :].min() < 0 \
or label[x: (x + tile_size), y: (y + tile_size)].min() < 0 \
or xp.unique(
label[x: (x + tile_size), y: (y + tile_size)]).shape[0] < 2:
x = random.randint(0, image.shape[0] - tile_size)
y = random.randint(0, image.shape[1] - tile_size)
# Generate img and mask patches
image_tile = image[x:(x + tile_size), y:(y + tile_size)]
label_tile = label[x:(x + tile_size), y:(y + tile_size)]
# Apply some random transformations
random_transformation = xp.random.randint(1, 8)
if random_transformation == 1:
image_tile = xp.fliplr(image_tile)
label_tile = xp.fliplr(label_tile)
if random_transformation == 2:
image_tile = xp.flipud(image_tile)
label_tile = xp.flipud(label_tile)
if random_transformation == 3:
image_tile = xp.rot90(image_tile, 1)
label_tile = xp.rot90(label_tile, 1)
if random_transformation == 4:
image_tile = xp.rot90(image_tile, 2)
label_tile = xp.rot90(label_tile, 2)
if random_transformation == 5:
image_tile = xp.rot90(image_tile, 3)
label_tile = xp.rot90(label_tile, 3)
if random_transformation > 5:
pass
images_list.append(image_tile)
labels_list.append(label_tile)
return xp.asarray(images_list), xp.asarray(labels_list)
def gen_random_tiles_include():
raise NotImplementedError
# -------------------------------------------------------------------------
# Standardizing Methods - Calculate
# -------------------------------------------------------------------------
def standardize_global(image, strategy='per-image') -> xp.array:
"""
Standardize numpy array using global standardization.
:param image: numpy array in the format (n,w,h,c).
:param strategy: can select between per-image or per-batch.
:return: globally standardized numpy array
"""
if strategy == 'per-batch':
mean = xp.mean(image) # global mean of all images
std = xp.std(image) # global std of all images
for i in range(image.shape[0]): # for each image in images
image[i, :, :, :] = (image[i, :, :, :] - mean) / std
return image
elif strategy == 'per-image':
return (image - xp.mean(image)) / xp.std(image)
def standardize_local(image, strategy='per-image') -> xp.array:
"""
Standardize numpy array using global standardization.
:param image: numpy array in the format (n,w,h,c).
:param strategy: can select between per-image or per-batch.
:return: globally standardized numpy array
"""
if strategy == 'per-batch':
mean = xp.mean(image) # global mean of all images
std = xp.std(image) # global std of all images
for i in range(image.shape[0]): # for each image in images
image[i, :, :, :] = (image[i, :, :, :] - mean) / std
return image
elif strategy == 'per-image':
for j in range(image.shape[0]): # for each channel in images
channel_mean = xp.mean(image[j, :, :])
channel_std = xp.std(image[j, :, :])
image[j, :, :] = \
(image[j, :, :] - channel_mean) / channel_std
return image
```
#### File: terragpu/indices/hls_indices.py
```python
import xarray as xr
__all__ = ["add_indices", "cs1", "dvi", "ndvi"]
CHUNKS = {'band': 1, 'x': 2048, 'y': 2048}
def _get_band_locations(raster_bands: list, requested_bands: list):
"""
Get list indices for band locations.
"""
locations = []
for b in requested_bands:
try:
locations.append(raster_bands.index(b.lower()))
except ValueError:
raise ValueError(f'{b} not in raster bands {raster_bands}')
return locations
def cs1(raster):
"""
Cloud detection index (CS1), CS1 := (3. * NIR1) / (Blue + Green + Red)
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with SI calculated
"""
nir1, red, blue, green = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'red', 'blue', 'green'])
index = (
(3. * raster['band_data'][nir1, :, :]) /
(raster['band_data'][blue, :, :] + raster['band_data'][green, :, :] \
+ raster['band_data'][red, :, :])
).compute()
return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
def cs2(raster):
"""
Cloud detection index (CS2), CS2 := (Blue + Green + Red + NIR1) / 4.
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with CS2 calculated
"""
nir1, red, blue, green = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'red', 'blue', 'green'])
index = (
(raster['band_data'][blue, :, :] + raster['band_data'][green, :, :] \
+ raster['band_data'][red, :, :] + raster['band_data'][nir1, :, :])
/ 4.0
).compute()
return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
def dvi(raster):
"""
Difference Vegetation Index (DVI), DVI := NIR1 - Red
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with DVI calculated
"""
nir1, red = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'red'])
index = (
raster['band_data'][nir1, :, :] - raster['band_data'][red, :, :]
).compute()
return index.expand_dims(dim="band", axis=0).chunk(chunks=CHUNKS)
def dwi(raster):
"""
Difference Water Index (DWI), DWI := factor * (Green - NIR1)
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with DWI calculated
"""
nir1, green = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'green'])
index = (
raster['band_data'][green, :, :] - raster['band_data'][nir1, :, :]
).compute()
return index.expand_dims(dim="band", axis=0).chunk(chunks=CHUNKS)
def fdi(raster):
"""
Forest Discrimination Index (FDI), type int16
8 band imagery: FDI := NIR2 - (RedEdge + Blue)
4 band imagery: FDI := NIR1 - (Red + Blue)
:param data: xarray or numpy array object in the form (c, h, w)
:return: new band with FDI calculated
"""
bands = ['blue', 'nir2', 'rededge']
if not all(b in bands for b in raster.attrs['band_names']):
bands = ['blue', 'nir1', 'red']
blue, nir, red = _get_band_locations(
raster.attrs['band_names'], bands)
index = (
raster['band_data'][nir, :, :] - \
(raster['band_data'][red, :, :] + raster['band_data'][blue, :, :])
).compute()
return index.expand_dims(dim="band", axis=0).chunk(chunks=CHUNKS)
def ndvi(raster):
"""
Difference Vegetation Index (DVI), NDVI := (NIR - Red) / (NIR + RED)
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with DVI calculated
"""
nir1, red = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'red'])
index = (
(raster['band_data'][nir1, :, :] - raster['band_data'][red, :, :]) /
(raster['band_data'][nir1, :, :] + raster['band_data'][red, :, :])
).compute()
return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
def ndwi(raster):
"""
Normalized Difference Water Index (NDWI)
NDWI := factor * (Green - NIR1) / (Green + NIR1)
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with SI calculated
"""
nir1, green = _get_band_locations(
raster.attrs['band_names'], ['nir1', 'green'])
index = (
(raster['band_data'][green, :, :] - raster['band_data'][nir1, :, :]) /
(raster['band_data'][green, :, :] + raster['band_data'][nir1, :, :])
).compute()
return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
def si(raster):
"""
Shadow Index (SI), SI := (Blue * Green * Red) ** (1.0 / 3)
:param raster: xarray or numpy array object in the form (c, h, w)
:return: new band with SI calculated
"""
red, blue, green = _get_band_locations(
raster.attrs['band_names'], ['red', 'blue', 'green'])
index = (
(raster['band_data'][blue, :, :] - raster['band_data'][green, :, :] /
raster['band_data'][red, :, :]) ** (1.0/3.0)
).compute()
return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
indices_mappings = {
'cs1': cs1,
'cs2': cs2,
'dvi': dvi,
'dwi': dwi,
'fdi': fdi,
'ndvi': ndvi,
'ndwi': ndwi,
'si': si
}
def get_indices(index_key):
try:
return indices_mappings[index_key]
except KeyError:
raise ValueError(f'Invalid indices mapping: {index_key}.')
def add_indices(raster, indices):
"""
:param rastarr: xarray or numpy array object in the form (c, h, w)
:param bands: list with strings of bands in the raster
:param indices: indices to calculate and append to the raster
:param factor: factor used for toa imagery
:return: raster with updated bands list
"""
nbands = len(raster.attrs['band_names']) # get initial number of bands
indices = [b.lower() for b in indices] # lowercase indices list
for index_id in indices: # iterate over each new band
# Counter for number of bands, increase metadata at concat
indices_function = get_indices(index_id)
nbands += 1 # Counter for number of bands
# Calculate band (indices)
new_index = indices_function(raster) # calculate the new index
new_index.coords['band'] = [nbands] # add band indices to raster
new_index = new_index.to_dataset() # move from array to dataset
# Set metadata
raster.attrs['band_names'].append(index_id)
raster = xr.concat([raster, new_index], dim='band')
return raster.chunk(chunks=CHUNKS)
```
#### File: xrasterlib/terragpu/utils.py
```python
import sys
import os
import logging
import configparser
from datetime import datetime # tracking date
try:
from cuml.dask.common import utils as dask_utils
from dask.distributed import Client
# from dask.distributed import wait
from dask_cuda import LocalCUDACluster
import dask_cudf
HAS_GPU = True
except ImportError:
HAS_GPU = False
# -------------------------------------------------------------------------------
# module utils
# Uilities module to include methods
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Module Methods
# -------------------------------------------------------------------------------
def create_logfile(logdir='results'):
"""
Create log file to output both stderr and stdout.
:param args: argparser object
:param logdir: log directory to store log file
:return: logfile instance, stdour and stderr being logged to file
"""
logfile = os.path.join(logdir, '{}_log.out'.format(
datetime.now().strftime("%Y%m%d-%H%M%S"))
)
logging.info('See ', logfile)
so = se = open(logfile, 'w') # open our log file
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w') # stdout buffering
os.dup2(so.fileno(), sys.stdout.fileno()) # redirect to the log file
os.dup2(se.fileno(), sys.stderr.fileno())
return logfile
def read_config(fname: str):
"""
Read INI format configuration file.
:params fname: filename of configuration file
:return: configparser object with configuration parameters
"""
try: # try initializing config object
config_file = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation()
) # initialize config parser
config_file.read(fname) # take first argument from script
except configparser.ParsingError as err: # abort if incorrect format
raise RuntimeError('Could not parse {}.'.format(err))
logging.info('Configuration file read.')
return config_file
def create_dcluster():
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster(threads_per_worker=1)
c = Client(cluster)
# Query the client for all connected workers
workers = c.has_what().keys()
n_workers = len(workers)
n_streams = 8 # Performance optimization
return c, workers, n_workers, n_streams
def distribute_dcluster(X_cudf, y_cudf, n_partitions, c, workers):
# x and y in cudf format
# n_partitions = n_workers
# c and workers come from the create_dcluster function
# First convert to cudf (you would likely load in cuDF format to start)
# X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))
# y_cudf = cudf.Series(y)
# Partition with Dask
# In this case, each worker will train on 1/n_partitions fraction of data
X_dask = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_dask = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
# Persist to cache the data in active memory
X_dask, y_dask = \
dask_utils.persist_across_workers(c, [X_dask, y_dask], workers=workers)
return X_dask, y_dask
``` |
{
"source": "jordancarlson08/MyStuff",
"score": 2
} |
#### File: cached_templates/scripts/user.jsm.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1396763868.373039
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\account\\scripts/user.jsm'
_template_uri = 'user.jsm'
_source_encoding = 'ascii'
import os, os.path, re
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
user = context.get('user', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer("\n\n//Ajax call to create a modal\n$(function() {\n\n\t$('#password_button').off('click.password').on('click.password', function(){\n\n\t\t$('#password_button').loadmodal({\n\t\t\turl: '/account/user__password/")
# SOURCE LINE 9
__M_writer(str(user.id))
__M_writer("',\n\t\t\tid: 'password_modal',\n\t\t\ttitle: '<h2>Edit Password</h2>',\n\t\t\twidth: '600px',\n\t\t\tajax: {\n\t\t\t\tdataType: 'html',\n\t\t\t\tmethod: 'POST',\n\t\t\t\tsuccess: function(data, status, xhr) {\n\t\t\t\t\tconsole.log($('#password_modal'));\n\t\t\t\t},//\n\t\t\t// any other options from the regular $.ajax call (see JQuery docs)\n\t\t\t\n\t\t\t},\n\t\t});\n\t});\n});\n\n//Ajax call to create a modal\n$(function() {\n\n\t$('#edit_button').off('click.edit').on('click.edit', function(){\n\n\t\t$('#edit_button').loadmodal({\n\t\t\turl: '/account/user__edit/")
# SOURCE LINE 32
__M_writer(str(user.id))
__M_writer("',\n\t\t\tid: 'edit_modal',\n\t\t\ttitle: '<h2>Edit Account Info</h2>',\n\t\t\twidth: '600px',\n\t\t\tajax: {\n\t\t\t\tdataType: 'html',\n\t\t\t\tmethod: 'POST',\n\t\t\t\tsuccess: function(data, status, xhr) {\n\t\t\t\t\tconsole.log($('#edit_modal'));\n\t\t\t\t},//\n\t\t\t// any other options from the regular $.ajax call (see JQuery docs)\n\t\t\t\n\t\t\t},\n\t\t});\n\t});\n});")
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: cached_templates/templates/base_dash.htm.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1396927126.388033
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\account\\templates/base_dash.htm'
_template_uri = 'base_dash.htm'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['main', 'content', 'adminOptions', 'title', 'left_side']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_template.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def main():
return render_main(context._locals(__M_locals))
def content():
return render_content(context._locals(__M_locals))
def adminOptions():
return render_adminOptions(context._locals(__M_locals))
def title():
return render_title(context._locals(__M_locals))
def left_side():
return render_left_side(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'main'):
context['self'].main(**pageargs)
return ''
finally:
context.caller_stack._pop_frame()
def render_main(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def main():
return render_main(context)
def content():
return render_content(context)
def adminOptions():
return render_adminOptions(context)
def title():
return render_title(context)
def left_side():
return render_left_side(context)
__M_writer = context.writer()
# SOURCE LINE 4
__M_writer('\n\n <div class="content"> \n <div class="row">\n <div class="col-md-3">\n <div class="container"> \n\n ')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'left_side'):
context['self'].left_side(**pageargs)
# SOURCE LINE 45
__M_writer(' \n\n </div>\n </div> \n </div>\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content():
return render_content(context)
__M_writer = context.writer()
# SOURCE LINE 38
__M_writer('\n Site content goes here in sub-templates.\n ')
return ''
finally:
context.caller_stack._pop_frame()
def render_adminOptions(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def adminOptions():
return render_adminOptions(context)
__M_writer = context.writer()
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def title():
return render_title(context)
__M_writer = context.writer()
return ''
finally:
context.caller_stack._pop_frame()
def render_left_side(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content():
return render_content(context)
def adminOptions():
return render_adminOptions(context)
def title():
return render_title(context)
def left_side():
return render_left_side(context)
__M_writer = context.writer()
# SOURCE LINE 11
__M_writer('\n ')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'title'):
context['self'].title(**pageargs)
# SOURCE LINE 12
__M_writer('</br>\n <div class="container bs-docs-container">\n <div class="row">\n\n <div class="col-md-3">\n <div class="bs-sidebar hidden-print" role="complementary">\n <ul class="nav bs-sidenav">\n <li>\n <a href="/Profile/">View/Edit Personal Profile</a>\n </li>\n <li>\n <a href="/managestore/">View Store Information</a>\n </li> \n <li>\n <a href="/inventory/">Manage Inventory</a>\n </li> \n <hr>\n <li>\n ')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'adminOptions'):
context['self'].adminOptions(**pageargs)
# SOURCE LINE 30
__M_writer('\n </li> \n </ul>\n </div>\n </div>\n\n <div class="col-md-9" role="main">\n\n ')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 40
__M_writer(' \n \n </div>\n </div> \n </div>\n ')
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: cached_templates/templates/email_forgot_password.html.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397169653.805318
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\account\\templates/email_forgot_password.html'
_template_uri = 'email_forgot_password.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
url = context.get('url', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer('<!DOCTYPE html>\r\n<html>\r\n<body style="margin:25px 50px;">\r\n\t<table>\r\n\t\t<tbody>\r\n\t\t\t<tr>\r\n\t\t\t\t<td><img alt="My Stuff" height="75" width="75" src="http://digitallifemyway.com/static/homepage/images/camera_icon_xs.png" /></td>\r\n\t\t\t\t<td> </td>\r\n\t\t\t\t<td><h1 style="font-family:sans-serif;">My Stuff - Digital Life My Way</h1></td>\r\n\t\t\t</tr>\r\n\t\t</tbody>\r\n\t</table>\r\n\r\n\t<br/>\r\n\t<h2 style="font-family:sans-serif;">Forgot Password</h2>\r\n\r\n\t<p \tstyle=\'font-family: sans-serif;\'>We recieved a request to reset your password.<br/> \r\n\tYou can use the following link to reset it.<br/><br/>\r\n\t<a href="')
# SOURCE LINE 19
__M_writer(str(url))
__M_writer('" \r\n\tstyle=\r\n\t\'\r\n\tfont-family: sans-serif;\r\n\tcolor:#fff;\r\n\tbackground-color:#5cb85c;\r\n\tborder-color:#4cae4c;\r\n\ttext-decoration:none;\r\n\tdisplay: inline-block;\r\n\tpadding: 6px 12px;\r\n\tmargin-bottom: 0;\r\n\tfont-size: 14px;\r\n\tfont-weight: normal;\r\n\tline-height: 1.42857143;\r\n\ttext-align: center;\r\n\twhite-space: nowrap;\r\n\tvertical-align: middle;\r\n\tcursor: pointer;\r\n\t-webkit-user-select: none;\r\n\t -moz-user-select: none;\r\n\t -ms-user-select: none;\r\n\t user-select: none;\r\n\tbackground-image: none;\r\n\tborder: 1px solid transparent;\r\n\tborder-radius: 4px;\r\n\t\'\r\n\t>Reset Password</a><br/><br/>\r\n\tFor security purposes, this link will only be active for 3 hours.</p>\r\n\t<br/>\r\n\t<br/>\r\n\tThank You!\r\n\r\n</body>\r\n</html>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: cached_templates/templates/repairstatus.html.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1396892629.436706
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\account\\templates/repairstatus.html'
_template_uri = 'repairstatus.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def content():
return render_content(context._locals(__M_locals))
repairs = context.get('repairs', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 47
__M_writer(' \r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content():
return render_content(context)
repairs = context.get('repairs', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 4
__M_writer('\r\n')
# SOURCE LINE 5
__M_writer('\r\n\r\n <h2>Repair Status</h2><hr/><br/>\r\n\r\n')
# SOURCE LINE 9
if repairs:
# SOURCE LINE 10
__M_writer(' <table class="table table-hover">\r\n \t<thead>\r\n \t\t<tr>\r\n \t\t\t<th>ID</th>\r\n \t\t\t<th>Item</th>\r\n <th>Date Received</th>\r\n \t\t\t<th>Est. Completion</th>\r\n \t\t\t<th>Status</th>\r\n\r\n \t\t</tr>\r\n \t</thead>\r\n \t<tbody>\r\n')
# SOURCE LINE 22
for s in repairs:
# SOURCE LINE 23
__M_writer(' <tr>\r\n <td>')
# SOURCE LINE 24
__M_writer(str(s.id))
__M_writer('</td>\r\n <td>')
# SOURCE LINE 25
__M_writer(str(s.itemName))
__M_writer('</td>\r\n <td>')
# SOURCE LINE 26
__M_writer(str(s.dateStart))
__M_writer('</td>\r\n <td>')
# SOURCE LINE 27
__M_writer(str(s.dateComplete))
__M_writer('</td>\r\n <td>')
# SOURCE LINE 28
__M_writer(str(s.status))
__M_writer('</td>\r\n </tr>\r\n')
# SOURCE LINE 31
__M_writer(' \t</tbody>\r\n</table>\r\n\r\n')
# SOURCE LINE 34
else:
# SOURCE LINE 35
__M_writer(' <p>It looks like you don\'t have any active repairs at this time. If you have any questions contact our repairs department at 1-800-555-5555</p>\r\n <div class="vertical_spacer6"></div>\r\n <div class="vertical_spacer6"></div>\r\n')
# SOURCE LINE 39
__M_writer('\r\n\r\n <div class="vertical_spacer6"></div>\r\n <div class="vertical_spacer6"></div>\r\n\r\n\r\n\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: account/views/forgotpassword.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from account.models import *
from . import templater
from uuid import *
from datetime import *
from django.core.mail import EmailMultiAlternatives, EmailMessage, send_mail
def process_request(request):
'''Login Page'''
form = ForgotForm()
if request.method == 'POST':
form = ForgotForm(request.POST)
if form.is_valid():
u = User.objects.get(username=form.cleaned_data['username'])
hours3 = datetime.now() + timedelta(hours=3)
# Generate Code and Expiration
u.passwordResetCode = uuid4()
u.passwordResetExp = hours3
u.save()
url = 'http://localhost:8000/account/resetpassword/'+str(u.passwordResetCode)
#HTML/TXT Email
email = u.email
tvars = {'url':url}
html_content = templater.render(request, 'email_forgot_password.html', tvars)
subject, from_email= 'Reset Your Password', '<EMAIL>'
text_content = 'Please use this link to reset your password %s, for security purposes this link will only be valid for the next 3 hours.' %(url)
msg = EmailMultiAlternatives(subject, text_content, from_email, [email])
msg.attach_alternative(html_content, "text/html")
msg.send()
#Display confirmation page
isSent=True
tvars = {'isSent':isSent}
return templater.render_to_response(request, 'logout.html', tvars)
tvars = {
'form':form,
}
return templater.render_to_response(request, 'forgotpassword.html', tvars)
class ForgotForm(forms.Form):
'''Forgot Password Form'''
username = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':'Username'}))
def clean(self):
try:
user = User.objects.get(username=self.cleaned_data['username'])
except:
user = None
if user == None:
raise forms.ValidationError("That user doesn't exist in our system")
return self.cleaned_data
```
#### File: account/views/newuser.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account.models import *
from account.views.login import *
from django.contrib.auth import authenticate, login
from . import templater
from django.core.mail import EmailMultiAlternatives, EmailMessage, send_mail
def process_request(request):
'''New User Page'''
u = User()
form = UserForm()
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
#time to save the data
u.username = form.cleaned_data['username']
u.set_password(form.cleaned_data['password'])
u.first_name = form.cleaned_data['first_name']
u.last_name = form.cleaned_data['last_name']
u.email = form.cleaned_data['email']
u.phone = form.cleaned_data['phone']
# u.security_question = form.cleaned_data['security_question']
# u.security_answer = form.cleaned_data['security_answer']
u.street1 = form.cleaned_data['street1']
u.street2 = form.cleaned_data['street2']
u.city = form.cleaned_data['city']
u.state = form.cleaned_data['state']
u.zipCode = form.cleaned_data['zipCode']
u.is_staff = False
u.save()
#Welcome Email
#HTML/TXT Email
email = u.email
tvars = {}
html_content = templater.render(request, 'email_welcome.html', tvars)
subject, from_email= 'Welcome to Digital Life My Way', '<EMAIL>'
text_content = 'Welcome to Digital Life My Way'
msg = EmailMultiAlternatives(subject, text_content, from_email, [email])
msg.attach_alternative(html_content, "text/html")
msg.send()
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
login(request, user)
#converts session to database -- see below
add_history_to_database(request, user)
request.session.set_expiry(0)
# Redirect to a success page.
return HttpResponse('<script> window.location.href="/index/";</script>')
else:
# Return a 'disabled account' error message
return HttpResponseRedirect('/manager/searchinventory/')
else:
# Return an 'invalid login' error message.
return HttpResponseRedirect('/manager/searchstores/')
tvars = {
'form':form,
}
return templater.render_to_response(request, 'newuser.html', tvars)
class UserForm(forms.Form):
username = forms.CharField(max_length=25, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username',}))
password = forms.CharField(max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
first_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name',}))
last_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name',}))
email = forms.CharField(max_length=50, widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': '<EMAIL>',}))
phone = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '801-555-1234',}))
# security_question = forms.CharField(label='Security Question', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'What is your mother\'s maiden name?',}))
# security_answer = forms.CharField(label='Answer', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Smith',}))
street1 = forms.CharField(label = "Street 1", widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '123 Center St.',}))
street2 = forms.CharField(label = "Street 2", required = False, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '#242',}))
city = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Provo',}))
state = forms.CharField(max_length=2, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'UT',}))
zipCode = forms.IntegerField( widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '84601',}))
def clean_username(self):
username = self.cleaned_data.get('username')
if username and User.objects.filter(username=username).count() > 0:
raise forms.ValidationError('This username is already registered.')
return username
```
#### File: MyStuff/base_app/user_util.py
```python
from django.http import HttpResponseRedirect
from account.models import *
from manager.models import *
from catalog.models import *
#custom decorators
def manager_check(user):
return user.is_staff == True
def employee_check(user):
try:
e = Employee.objects.get(user=user)
employee = True
except:
employee = False
return employee
def user_check(user):
return user.id
def my_account(function):
'''Decorator that restricts access to the account page to the owner of that page and all managers'''
def wrapper(request, *args, **kw):
user=request.user
isEmployee = employee_check(user)
userid = user.id
url=request.urlparams[0]
if user.id == int(url) or user.is_staff==True:
return function(request, *args, **kw)
else:
if isEmployee==True:
return HttpResponseRedirect('/manager/employee/'+str(userid))
else:
return HttpResponseRedirect('/account/user/'+str(userid))
return wrapper
def get_users_only():
'''Returns list of users withouth any employees'''
employee_list = Employee.objects.filter(user__is_active='True').order_by('user__first_name').exclude(user__id=99997)
emp_id = []
for e in employee_list:
emp_id.append(e.user.id)
emp_id.append(99997)
emp_id.append(99998)
user_list = User.objects.filter(is_active=True).exclude(id__in=emp_id)
return user_list
def get_managers():
'''Returns a list of Managers'''
emps = Employee.objects.filter(user__is_staff='True').exclude(id=99999)
return emps
def get_employees():
'''Returns of list of employees without managers'''
emps = Employee.objects.exclude(user__is_staff='True').exclude(id=99998)
return emps
```
#### File: catalog/views/search.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from . import templater
from django.contrib.auth.decorators import login_required
def process_request(request):
'''Shows the catalog items'''
catItems = hmod.CatalogItem.objects.all()
brands = hmod.CatalogItem.objects.distinct('manufacturer')
tvars = {
'brands':brands,
'catItems':catItems,
}
return templater.render_to_response(request, 'search.html', tvars)
def process_request__results(request):
#Determines if the filter is for ALL or a specific brand
if request.urlparams[0] != 'All':
catItems = hmod.CatalogItem.objects.filter(manufacturer=request.urlparams[0])
elif request.urlparams[0] == 'All':
catItems = hmod.CatalogItem.objects.all()
tvars = {
'catItems':catItems,
}
return templater.render_to_response(request, 'search_results.html', tvars)
```
#### File: cached_templates/templates/dash.html.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397084402.354378
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\manager\\templates/dash.html'
_template_uri = 'dash.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
request = context.get('request', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer('<!--## This is the base page for both the dashboards. Sprouting off of this one will be a manager and an admin page with minute\r\n')
# SOURCE LINE 3
__M_writer('\r\n')
# SOURCE LINE 6
__M_writer(' \r\n')
# SOURCE LINE 8
__M_writer(' ')
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 23
__M_writer('<!--ends content-->\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
request = context.get('request', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
# SOURCE LINE 10
__M_writer(' ')
__M_writer('\r\n <h2>Welcome back, ')
# SOURCE LINE 11
__M_writer(str(request.user.first_name))
__M_writer(' ')
__M_writer(str(request.user.last_name))
__M_writer("!</h2></br>\r\n <p>Use the left-side navigation bar to view connecting pages and options by clicking on each section heading. To view your own account information, log out, or return to your dashboard, use the dropdown menu in the upper right hand corner.</p>\r\n\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n <div class='vertical_spacer6'></div>\r\n\r\n")
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: cached_templates/templates/newcatalogitem.html.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397225842.854062
_enable_loop = True
_template_filename = '/Users/ecookson/Desktop/MyStuff/manager/templates/newcatalogitem.html'
_template_uri = 'newcatalogitem.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
form = context.get('form', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer('\n')
# SOURCE LINE 3
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 39
__M_writer(' ')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
form = context.get('form', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
# SOURCE LINE 5
__M_writer('\n')
# SOURCE LINE 6
__M_writer('\n\n<h2>New Catalog Item</h2><hr/>\n</br>\n\n<!-- Custom Form -->\n<form class ="form-horizontal" role="form" method ="POST" enctype="multipart/form-data">\n\n<!-- Loop through the fields of the form -->\n')
# SOURCE LINE 15
for f in form:
# SOURCE LINE 16
__M_writer('\n <div class="form-group">\n <label class="col-sm-3 control-label" for="id_')
# SOURCE LINE 18
__M_writer(str( f.name ))
__M_writer('">')
__M_writer(str( f.label ))
__M_writer('</label> <!-- the label -->\n <div class="col-sm-6">\n ')
# SOURCE LINE 20
__M_writer(str(f))
__M_writer(' <!-- The input box -->\n ')
# SOURCE LINE 21
__M_writer(str(f.errors))
__M_writer('\n </div>\n </div>\n\n')
# SOURCE LINE 26
__M_writer('\n <div class="form-group">\n <div class="col-sm-offset-3 col-sm-6">\n <input class="btn btn-success" type="submit" value="Save">\n </div>\n </div>\n</form>\n\n\n\n\n\n\n')
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: cached_templates/templates/newserializeditem.html.py
```python
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397084405.494103
_enable_loop = True
_template_filename = 'C:\\Users\\<NAME>\\Desktop\\MyStuff\\manager\\templates/newserializeditem.html'
_template_uri = 'newserializeditem.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
form = context.get('form', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(' ')
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 33
__M_writer(' ')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
form = context.get('form', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer('\r\n')
# SOURCE LINE 4
__M_writer('\r\n\r\n <h2>New Serialized Item</h2><hr/>\r\n\t</br>\r\n\r\n <form class ="form-horizontal" role="form" method ="POST">\r\n\r\n')
# SOURCE LINE 11
for f in form:
# SOURCE LINE 12
__M_writer('\r\n <div class="form-group">\r\n <label class="col-sm-3 control-label" for="id_')
# SOURCE LINE 14
__M_writer(str( f.name ))
__M_writer('">')
__M_writer(str( f.label ))
__M_writer('</label>\r\n <div class="col-sm-6">\r\n ')
# SOURCE LINE 16
__M_writer(str(f))
__M_writer(' \r\n ')
# SOURCE LINE 17
__M_writer(str(f.errors))
__M_writer('\r\n </div>\r\n </div>\r\n\r\n')
# SOURCE LINE 22
__M_writer('\r\n <div class="form-group">\r\n <div class="col-sm-offset-3 col-sm-6">\r\n <input class="btn btn-success" type="submit" value="Save">\r\n </div>\r\n </div>\r\n </form>\r\n\r\n<div class=\'vertical_spacer6\'></div>\r\n\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
```
#### File: manager/views/commissions.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager.models import *
from account.models import *
from catalog.models import *
from . import templater
from datetime import *
from django.core.mail import send_mail
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(manager_check)
def process_request(request):
'''Shows the commissions for a user'''
form = CommissionsForm()
c_list = Commission.objects.all()
tvars = {
'form':form,
}
return templater.render_to_response(request, 'commissions.html', tvars)
@login_required
@user_passes_test(manager_check)
def process_request__get(request):
'''Shows the commissions for a user'''
c_list = Commission.objects.filter(transaction__employee__id=request.urlparams[0])
tvars = {
'c_list':c_list,
}
return templater.render_to_response(request, 'commissions_list.html', tvars)
class CommissionsForm(forms.Form):
'''A form for new stores'''
emp = forms.ModelChoiceField(label='Employee' ,queryset=get_employees(), widget=forms.Select(attrs={'class': 'form-control', 'id':'emp'}))
```
#### File: manager/views/dash.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account import models as amod
from . import templater
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(employee_check)
def process_request(request):
'''Shows the dashboard'''
tvars = {
}
return templater.render_to_response(request, 'dash.html', tvars)
```
#### File: manager/views/fees.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager.models import *
from catalog.models import *
from . import templater
from datetime import *
from decimal import *
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(employee_check)
def process_request(request):
'''Records Damage and Late Fees'''
skip = []
if (request.urlparams[1] == 'late'):
r = Rental.objects.get(id=request.urlparams[0])
daysLate = date.today() - r.dateDue
daysLate = daysLate.days
r_items = RentalItem.objects.filter(rental=r)
rental_fee_list=[]
for i in r_items:
rental_fee_list.append(i.item.lateFee * daysLate)
sum_fees = sum(rental_fee_list)
form = FeeForm(initial={
'lateFee': sum_fees,
})
else:
skip = ['Late Fee', 'Waive Late Fee']
form = FeeForm(initial={
'waiveLateFee': True,
})
if request.method == 'POST':
form = FeeForm(request.POST)
if form.is_valid():
l = ''
d = ''
r = request.urlparams[0]
if (request.urlparams[1] == 'late'):
lf = Late()
lf.daysLate = daysLate
lf.waived = form.cleaned_data['waiveLateFee']
if (lf.waived == True):
lf.amount = 0
else:
lf.amount = form.cleaned_data['lateFee']
lf.save()
l = lf.id
df = Damage()
df.description = form.cleaned_data['description']
df.waived = form.cleaned_data['waiveDamageFee']
if (df.waived == True):
df.amount = Decimal(0.01)
else:
df.amount = form.cleaned_data['damageFee']
df.amount += Decimal(0.01)
df.save()
d = df.id
params = '%s/%s/%s' %(r, d, l)
url = '/catalog/checkout/' +str(params)
return HttpResponseRedirect(url)
tvars = {
'form':form,
'skip':skip,
}
return templater.render_to_response(request, 'fees.html', tvars)
class FeeForm(forms.Form):
'''The form used for damage and late fees'''
lateFee = forms.DecimalField(required=False, label='Late Fee', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Late Fee',}))
waiveLateFee = forms.BooleanField(required=False, label='Waive Late Fee')
damageFee = forms.DecimalField(required=False, label='Damage Fee', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Damage Fee',}))
waiveDamageFee = forms.BooleanField(required=False, label='Waive Damage Fee')
description = forms.CharField(required=False, label='Damage Description', widget=forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'Condition Details',}))
def clean(self):
cleaned_data = super(FeeForm, self).clean()
waiveDamageFee = cleaned_data.get("waiveDamageFee")
damageFee = cleaned_data.get("damageFee")
print(damageFee)
if waiveDamageFee == False and damageFee == None:
raise forms.ValidationError('Please enter a damage fee.')
return cleaned_data
```
#### File: manager/views/inventory.py
```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from . import templater
from manager.views.newcatalogitem import CatalogItemForm
from manager.views.newserializeditem import SerializedItemForm
import datetime
from django.core.mail import send_mail
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(manager_check)
def process_request(request):
'''Shows a catalog item and its associated serialized items'''
#Display Function
item = hmod.CatalogItem.objects.get(id=request.urlparams[0])
#This should get the serialized items that are tied to the catalog item
serial = hmod.SerializedItem.objects.filter(catalogItem=item.id).filter(isActive=True).exclude(isRental=True).exclude(isSold=True)
c = hmod.CatalogItem.objects.get(id=request.urlparams[0])
conditions_list = hmod.Condition.objects.all()
stores_list = hmod.Store.objects.filter(isActive="TRUE").order_by('locationName')
rentals = hmod.SerializedItem.objects.filter(catalogItem=item.id).filter(isRental=True)
form = CatalogItemForm(initial={
'name': c.name,
'manufacturer': c.manufacturer,
'listPrice': c.listPrice,
'cost': c.cost,
'commissionRate': c.commissionRate,
'description': c.description,
'techSpecs': c.techSpecs,
'sku': c.sku,
'fillPoint': c.fillPoint,
'leadTime': c.leadTime,
'category':c.category,
'isSerial':c.isSerial,
'img':c.img,
})
if request.method == 'POST':
form = CatalogItemForm(request.POST)
if form.is_valid():
#time to save the data
c.name = form.cleaned_data['name']
c.manufacturer = form.cleaned_data['manufacturer']
c.listPrice = form.cleaned_data['listPrice']
c.cost = form.cleaned_data['cost']
c.commissionRate = form.cleaned_data['commissionRate']
c.description = form.cleaned_data['description']
c.techSpecs = form.cleaned_data['techSpecs']
c.sku = form.cleaned_data['sku']
c.fillPoint = form.cleaned_data['fillPoint']
c.leadTime = form.cleaned_data['leadTime']
c.category = form.cleaned_data['category']
c.isSerial = form.cleaned_data['isSerial']
img = request.FILES.get('img', None)
c.img = '/static/catalog/images/products/'+str(img)
c.save()
return HttpResponseRedirect('/manager/inventory/' + str(request.urlparams[0]))
tvars = {
'item':item,
'form':form,
'serial':serial,
'conditions_list':conditions_list,
'stores_list': stores_list,
'rentals':rentals,
}
return templater.render_to_response(request, 'inventory.html', tvars)
def process_request__delete(request):
#Delete Function
c = hmod.CatalogItem.objects.get(id=request.urlparams[0])
c.isActive = False
c.save()
return HttpResponseRedirect('/manager/searchinventory/')
``` |
{
"source": "jordanchou/curtinideas",
"score": 2
} |
#### File: website/accounts/views.py
```python
from django.contrib.auth.forms import PasswordResetForm
from django.shortcuts import redirect, render, get_object_or_404
from django.views.generic import CreateView, DetailView
from django.http import HttpResponse
from django.template import Context, loader
from django.utils import timezone
from .forms import RegistrationForm
from .models import CustomUser
class RegistrationView(CreateView):
form_class = RegistrationForm
model = CustomUser
def form_valid(self, form):
obj = form.save(commit=False)
obj.slug = obj.email
obj.save()
#reset_form = PasswordResetForm(self.request.POST)
# reset_form.is_valid()
# Copied from django/contrib/auth/views.py : password_reset
# opts = {
# 'use_https': self.request.is_secure(),
# 'email_template_name': 'accounts/verification.html',
# 'subject_template_name' : 'accounts/verification_subject.html',
# 'request': self.request,
# }
# This form sends the email on save()
# reset_form.save(**opts)
template = loader.get_template('accounts/registration_done.html')
return HttpResponse(template.render())
class AccountDetailView(DetailView):
model = CustomUser
def get_context_data(self, **kwargs):
context = super(AccountDetailView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
return context
def account_detail(self, slug):
account = get_object_or_404(CustomUser, slug)
return render(request, 'accounts/customuser_detail.html', {'accounts': account})
class AccountUpdateView(DetailView):
model = CustomUser
fields = ['First Name', 'Last Name', 'ID']
template_name_suffix = '_update_form'
def get(self, slug, **kwargs):
account = get_object_or_404(CustomUser, slug)
return render(request, 'accounts/customuser_update_form.html', {'accounts': account})
def get_object(self, queryset=None):
return self.request.user
```
#### File: website/submission/views.py
```python
from .models import Submission, Comment, SubVoting, ComVoting
from accounts.models import CustomUser
from .forms import SubmissionForm, CommentForm
from django.utils import timezone
from django.shortcuts import render, get_object_or_404, redirect, render_to_response, RequestContext
from . import searchfunctions
#-----------------------------------------------------------------------------
def submission_detail(request, pk):
submission = get_object_or_404(Submission, pk=pk)
submission.increase_view()
submission.save()
return render(request, 'submission/submission_detail.html', {'submission': submission})
#-----------------------------------------------------------------------------
def submission_list(request):
submissions = Submission.objects.filter(
published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_list_upvotes(request):
submissions = Submission.objects.order_by('-upvotes')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_list_downvotes(request):
submissions = Submission.objects.order_by('-downvotes')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_list_num_views(request):
submissions = Submission.objects.order_by('-num_views')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_list_author(request):
submissions = Submission.objects.order_by('-author')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
#
def submission_list_score(request):
submissions = list(Submission.objects.all())
submissions = sorted(
submissions, key=lambda s: s.get_score(), reverse=True)
return render(request, 'submission/submission_list.html', {'submissions': submissions})
def submission_list_self(request, slug):
submissions = Submission.objects.filter(author__email=slug)
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_create(request):
if request.method == "POST":
form = SubmissionForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
post.author.add_points(4)
return submission_list(request)
else:
form = SubmissionForm()
return render(request, 'submission/submission_create.html', {'form': form})
#-----------------------------------------------------------------------------
def update_sub_upvotes(request, slug, pk):
submission = get_object_or_404(Submission, pk=pk)
voter = get_object_or_404(CustomUser, slug=slug)
downvote = submission.get_downvotes()
upvote = submission.get_upvotes()
vote = SubVoting.objects.filter(voter=voter, submission=submission)
# If vote exists, remove vote from submission, remove all related points
# Remove vote tuple from database. user can now re-vote if required
if (vote.exists() == True):
if ( vote[0].upvote == True ):
Submission.objects.filter(pk=submission.pk).update(upvotes=upvote - 1)
else:
Submission.objects.filter(pk=submission.pk).update(downvotes=downvote - 1)
voter.add_points(-1)
submission.author.add_points(-1)
vote.delete()
# User hasn't vote on this submission, so let them vote
else:
new_vote = SubVoting()
new_vote.create_sub_up_vote(submission, voter)
submission_vote = Submission.objects.filter(pk=submission.pk).update(upvotes=upvote + 1)
voter.add_points(1)
submission.author.add_points(1)
return submission_list(request)
#-----------------------------------------------------------------------------
def update_sub_downvotes(request, slug, pk):
submission = get_object_or_404(Submission, pk=pk)
voter = get_object_or_404(CustomUser, slug=slug)
downvote = submission.get_downvotes()
upvote = submission.get_upvotes()
vote = SubVoting.objects.filter(voter=voter, submission=submission)
# If vote exists, remove vote from submission, remove all related points
# Remove vote tuple from database. user can now re-vote if required
if (vote.exists() == True):
if ( vote[0].upvote == True ):
Submission.objects.filter(pk=submission.pk).update(upvotes=upvote - 1)
else:
Submission.objects.filter(pk=submission.pk).update(downvotes=downvote - 1)
voter.add_points(-1)
submission.author.add_points(-1)
vote.delete()
else:
new_vote = SubVoting()
new_vote.create_sub_down_vote(submission, voter)
submission_vote = Submission.objects.filter(pk=submission.pk).update(downvotes=downvote + 1)
voter.add_points(1)
return submission_list(request)
#-----------------------------------------------------------------------------
def update_com_upvotes(request, slug, pk):
comment = get_object_or_404(Comment, pk=pk)
voter = get_object_or_404(CustomUser, slug=slug)
downvote = comment.get_downvotes()
upvote = comment.get_upvotes()
vote = ComVoting.objects.filter(voter=voter, comment=comment)
if (vote.exists() == True):
if ( vote[0].upvote == True ):
Comment.objects.filter(pk=comment.pk).update(upvotes=upvote - 1)
else:
Comment.objects.filter(pk=comment.pk).update(downvotes=downvote - 1)
voter.add_points(-1)
if(comment.is_improvement):
comment.author.add_points(-1)
vote.delete()
else:
new_vote = ComVoting()
new_vote.create_com_up_vote(comment, voter)
upvote = comment.get_upvotes()
comment_vote = Comment.objects.filter(pk=comment.pk).update(upvotes=upvote + 1)
voter.add_points(1)
if(comment.is_improvement):
comment.author.add_points(1)
submission = comment.submission
return render(request, 'submission/submission_detail.html', {'submission': submission})
#-----------------------------------------------------------------------------
def update_com_downvotes(request, slug, pk):
comment = get_object_or_404(Comment, pk=pk)
voter = get_object_or_404(CustomUser, slug=slug)
downvote = comment.get_downvotes()
upvote = comment.get_upvotes()
vote = ComVoting.objects.filter(voter=voter, comment=comment)
if (vote.exists() == True):
if ( vote[0].upvote == True ):
Comment.objects.filter(pk=comment.pk).update(upvotes=upvote - 1)
else:
Comment.objects.filter(pk=comment.pk).update(downvotes=downvote - 1)
voter.add_points(-1)
vote.delete()
else:
new_vote = ComVoting()
new_vote.create_com_down_vote(comment, voter)
comment_vote = Comment.objects.filter(pk=comment.pk).update(downvotes=downvote + 1)
voter.add_points(1)
submission = comment.submission
return render(request, 'submission/submission_detail.html', {'submission': submission})
#-----------------------------------------------------------------------------
def submission_delete(request, pk):
submission = get_object_or_404(Submission, pk=pk)
submission.author.add_points(-4)
submission.delete()
submissions = Submission.objects.filter(
published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
def submission_edit(request, pk):
submission = get_object_or_404(Submission, pk=pk)
if request.method == "POST":
form = SubmissionForm(request.POST, instance=submission)
if form.is_valid():
submission = form.save(commit=False)
submission.author = request.user
submission.published_date = timezone.now()
submission.save()
return redirect('/submission/', pk=submission.pk)
else:
form = SubmissionForm(instance=submission)
return render(request, 'submission/submission_create.html', {'form': form})
#-----------------------------------------------------------------------------
def comment_on_submission(request, slug, pk):
submission = get_object_or_404(Submission, pk=pk)
author = get_object_or_404(CustomUser, slug=slug)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.submission = submission
comment.author = author
comment.save()
if comment.is_improvement:
author.add_points(3)
else:
author.add_points(2)
return render(request, 'submission/submission_detail.html', {'submission': submission})
else:
form = CommentForm()
return render(request, 'submission/comment_on_submission.html', {'form': form})
#-----------------------------------------------------------------------------
def comment_edit(request, pk):
comment = get_object_or_404(Comment, pk=pk)
submission = get_object_or_404(Submission, pk=comment.submission.pk)
if request.method == "POST":
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(commit=False)
comment.submission = submission
comment.author = request.user
comment.save()
return render(request, 'submission/submission_detail.html', {'submission': submission})
else:
form = CommentForm(instance=comment)
return render(request, 'submission/comment_on_submission.html', {'form': form})
#-----------------------------------------------------------------------------
def comment_delete(request, pk):
comment = get_object_or_404(Comment, pk=pk)
submission = comment.submission.pk
comment.delete()
submission = get_object_or_404(Submission, pk=submission)
return render(request, 'submission/submission_detail.html', {'submission': submission})
def submission_list_science(request):
submissions = Submission.objects.filter(category="Science")
return render(request, 'submission/submission_list.html', {'submissions': submissions})
def submission_list_engineering(request):
submissions = Submission.objects.filter(category="Engineering")
return render(request, 'submission/submission_list.html', {'submissions': submissions})
def submission_list_health_sciences(request):
submissions = Submission.objects.filter(category="Health Sciences")
return render(request, 'submission/submission_list.html', {'submissions': submissions})
def submission_list_humanities(request):
submissions = Submission.objects.filter(category="Humanities")
return render(request, 'submission/submission_list.html', {'submissions': submissions})
def submission_list_arts(request):
submissions = Submission.objects.filter(category="Arts")
return render(request, 'submission/submission_list.html', {'submissions': submissions})
#-----------------------------------------------------------------------------
# search
def search(request):
query_string = ''
submissions = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = searchfunctions.get_query(
query_string, ['title', 'text'])
submissions = Submission.objects.filter(
entry_query).order_by('-published_date')
return render_to_response('submission/submission_list.html',
{'submissions': submissions},
context_instance=RequestContext(request))
#-----------------------------------------------------------------------------
``` |
{
"source": "jordanch/rupsy-downloader",
"score": 3
} |
#### File: jordanch/rupsy-downloader/program.py
```python
import os
import re
import datetime
import requests
import shutil
from bs4 import BeautifulSoup
def main():
print_header()
releases = get_user_selection_and_links()
print(releases)
download_selection(releases)
def print_header():
print('----------------------------------------')
print('--------RUPSY DOWNLOAD PROGRAM----------')
print('----------------------------------------')
def get_user_selection_and_links():
# download music based on artist selection
# TODO: add validation on input
artist_selection = input('What artist would you like releases from?\n')
# scrape site and get URLs for releases for artist
url = 'http://www.rupsy.ru/index.php'
host = 'www.rupsy.ru'
html = requests.get(url + '?id=4&search={0}'.format(artist_selection))
soup = BeautifulSoup(html.text, 'html.parser')
releases = soup.find_all('td', class_='rel')
release_list = []
for release in releases:
release_list.append((release.find('div', class_='rel_name').find('a').text,
host + release.find('div', style='text-align:center;').find('a')['href']))
return release_list
def download_selection(releases):
download_path = os.path.join(os.path.abspath(os.path.curdir), 'rupsy-downloads')
# check for folder and create rupsy-download folder if necessary
if not (os.path.isdir(download_path)):
os.makedirs(download_path)
# download releases
for release in releases:
# get download filename
rh = requests.head('http://' + release[1])
release_download_filename = release[0].replace(' ', '') + '.{0}'\
.format(rh.headers['Location'].split('.')[-1]).lower()
# create file if one doesn't exist
if not (os.path.isfile(os.path.join(download_path, release_download_filename))):
dir_fd = os.open(download_path, os.O_RDONLY)
def opener(path, flags):
return os.open(path, flags, dir_fd=dir_fd)
r = requests.get('http://' + release[1], stream=True)
print('Starting release download')
with open(release_download_filename, 'wb', opener=opener) as fd:
c = 0
for chunk in r.iter_content(1024):
if c % 1048576 == 0:
print('Downloaded 1MB of {0}...'.format(release[0]))
fd.write(chunk)
c += 1024
os.close(dir_fd) # don't leak a file descriptor
print('Finished downloading {0} to {1}'.format(release[0], download_path))
# unpacking zip if zip
if os.path.splitext(os.path.join(download_path, release_download_filename))[1] in ['.zip', '.tar']:
print('Unpacking compressed file for {0}'.format(release[0]))
shutil.unpack_archive(os.path.join(download_path, release_download_filename), extract_dir=download_path)
print('Successfully unpacked file. Deleting compressed source...')
os.remove(os.path.join(download_path, release_download_filename))
print('Done!')
else:
print('You already have downloaded {0}'.format(release[0]))
if __name__ == '__main__':
main()
``` |
{
"source": "JordanComstock/rpi_ws281x",
"score": 3
} |
#### File: rpi_ws281x/python/bluetooth_receive.py
```python
import json
import bluetooth
import threading
import timed_LEDs
server_socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
port = 1
server_socket.bind(("", port))
server_socket.listen(1)
client_socket, address = server_socket.accept()
print("Accepted connection from ", address)
threads = []
while True:
data = client_socket.recv(1024)
data = json.loads(data.decode())
if(data["lap_times"]):
t = threading.Thread(target=start_laps(int(data["delay"]), data["lap_times"]))
threads.append(t)
t.start()
client_socket.close()
def start_laps(delay, lap_times):
timed_LEDs.start_LEDs(delay, lap_times)
``` |
{
"source": "Jordan-Cottle/domonic",
"score": 2
} |
#### File: domonic/domonic/CDN.py
```python
class CDN_IMG(object):
""" CDN images """
# - icons
# - UI - emojis
'''
# SOME EXAMPLES. NOT ALL ARE HTTPS:
http://placehold.it/350x150
http://unsplash.it/200/300
http://lorempixel.com/400/200
http://dummyimage.com/600x300/000/fff
# https://dummyimage.com/420x320/ff7f7f/333333.png&text=Sample
http://placekitten.com/200/300
https://placeimg.com/640/480/any
http://placebear.com/g/200/300
https://ipsumimage.appspot.com/140x100, ff7700
https://www.fillmurray.com/640/360
https://baconmockup.com/640/360
https://placebeard.it/640x360
https://www.placecage.com/640/360
https://www.stevensegallery.com/640/360
https://fakeimg.pl/640x360
# https://fakeimg.pl/420x320/ff0000,128/333333,255/?text=Sample&font=lobster
https://picsum.photos/640/360
https://via.placeholder.com/420x320/ff7f7f/333333?text=Sample
https://keywordimg.com/420x320/random
http://www.dummysrc.com/430x320.png/22c5fc/17202A
'''
PLACEHOLDER_SERVICE = "loremflickr.com"
@staticmethod
def PLACEHOLDER(width=100, height=100, HTTP="", seperator='/'):
"""
to update do CDN_IMG.PLACEHOLDER_SERVICE = "placebear.com/g"
usage : img(_src=CDN_IMG.PLACEHOLDER(300,100))
default HTTP is none, to let the browser decide
# use optional seperator if the site uses x instead of slash
img(_src=CDN_IMG.PLACEHOLDER(300,100,'x'))
"""
return f"{HTTP}://{CDN_IMG.PLACEHOLDER_SERVICE}/{width}{seperator}{height}"
class CDN_JS(object):
"""
You will need to append the lib version number if you add any libs here
# obvious candidates... https://github.com/sorrycc/awesome-javascript
"""
JQUERY_3_5_1 = "https://code.jquery.com/jquery-3.5.1.min.js"
JQUERY_UI = "https://code.jquery.com/ui/1.12.0/jquery-ui.min.js"
UNDERSCORE = "https://cdn.jsdelivr.net/npm/[email protected]/underscore-min.js"
BOOTSTRAP_4 = "https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js"
POPPER_1_16_1 = "https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js"
BOOTSTRAP_5_ALPHA = "https://stackpath.bootstrapcdn.com/bootstrap/5.0.0-alpha1/js/bootstrap.min.js"
D3_6_1_0 = "https://cdnjs.cloudflare.com/ajax/libs/d3/6.1.0/d3.min.js"
MODERNIZER_2_8_3 = "https://cdnjs.cloudflare.com/ajax/libs/modernizr/2.8.3/modernizr.min.js"
MOMENT_2_27_0 = "https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.27.0/moment.min.js"
PIXI_5_3_3 = "https://cdnjs.cloudflare.com/ajax/libs/pixi.js/5.3.3/pixi.min.js"
SOCKET_1_4_5 = "https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.4.5/socket.io.min.js"
X3DOM = "https://www.x3dom.org/download/x3dom.js"
AFRAME_1_2 = "https://aframe.io/releases/1.2.0/aframe.min.js"
BRYTHON_3_9_5 = "https://cdnjs.cloudflare.com/ajax/libs/brython/3.9.5/brython.min.js"
MATHML = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=MML_HTMLorMML" # polyfill mathml
# def find_on_cdn():
# https://cdn.jsdelivr.net/npm/
# https://cdnjs.cloudflare.com/ajax/libs/
# def dl(self, path=None): # download
# if path none domonic.JS_MASTER < strip off name to get default assets folder if non passed
class CDN_CSS(object):
"""
Preferably use version numbers if available. user LATEST if it always gets the latest
"""
BOOTSTRAP_5_ALPHA = "https://stackpath.bootstrapcdn.com/bootstrap/5.0.0-alpha1/js/bootstrap.min.js"
BOOTSTRAP_4 = "https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css"
MARX = "https://unpkg.com/marx-css/css/marx.min.css" # version?
MVP = "https://unpkg.com/mvp.css" # version?
WATER_LATEST = "https://cdn.jsdelivr.net/gh/kognise/water.css@latest/water.min.css" # note 'latest' in cdn url
BALLOON = "https://unpkg.com/balloon-css/balloon.min.css"
THREE_DOTS_0_2_0 = "https://cdnjs.cloudflare.com/ajax/libs/three-dots/0.2.0/three-dots.min.css"
MILLIGRAM_1_3_0 = "https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.css"
X3DOM = "https://www.x3dom.org/download/x3dom.css"
FONTAWESOME_5_7_1 = "https://use.fontawesome.com/releases/v5.7.1/css/all.css"
MDI_5_4_55 = "https://cdn.materialdesignicons.com/5.4.55/css/materialdesignicons.min.css" # icons
# find_on_cdn():
# https://unpkg.com/
# https://cdnjs.cloudflare.com/ajax/libs/
# def dl(self, path=domonic.JS_MASTER): # download
class CDN_FONT(object):
@staticmethod
def google(family):
return "http://fonts.googleapis.com/css?family=" + '+'.join(family)
```
#### File: domonic/constants/entities.py
```python
class Entity():
def __init__(self, entity: str):
self.entity = entity
def __str__(self):
import html
return html.unescape(self.character)
class Char():
def __init__(self, character: str):
self.character = character
def __str__(self):
import html
return html.escape(self.character)
# def __repr__(self):
# return self.character
# web
# ASCII Characters (Printable)
SPACE = ' '
EXCLAMATION_MARK = '!' #: !
QUOTATION_MARK = '"' #: "
NUMBER_SIGN = '#' #: #
DOLLAR_SIGN = '$' #: $
PERCENT_SIGN = '%' #: %
AMPERSAND = '&' #: &
APOSTROPHE = ''' #: '
OPENING_PARENTHESIS = '(' #: (
LEFT_PARENTHESIS = '(' #: (
CLOSING_PARENTHESIS = ')' #: )
RIGHT_PARENTHESIS = ')' #: )
ASTERISK = '*' #: *
PLUS_SIGN = '+' #: +
COMMA = ',' #: ,
HYPHEN = '-' #: -
PERIOD = '.' #: .
SLASH = '/' #: /
ZERO = '0' #: 0
ONE = '1' #: 1
TWO = '2' #: 2
THREE = '3' #: 3
FOUR = '4' #: 4
FIVE = '5' #: 5
SIX = '6' #: 6
SEVEN = '7' #: 7
EIGHT = '8' #: 8
NINE = '9' #: 9
COLON = ':' #: :
SEMICOLON = ';' #: ;
LESS_THAN = '<' #: <
EQUALS_SIGN = '=' #: =
GREATER_THAN = '>' #: >
QUESTION_MARK = '?' #: ?
AT_SIGN = '@' #: @
UPPERCASE_A = 'A' #: A
UPPERCASE_B = 'B' #: B
UPPERCASE_C = 'C' #: C
UPPERCASE_D = 'D' #: D
UPPERCASE_E = 'E' #: E
UPPERCASE_F = 'F' #: F
UPPERCASE_G = 'G' #: G
UPPERCASE_H = 'H' #: H
UPPERCASE_I = 'I' #: I
UPPERCASE_J = 'J' #: J
UPPERCASE_K = 'K' #: K
UPPERCASE_L = 'L' #: L
UPPERCASE_M = 'M' #: M
UPPERCASE_N = 'N' #: N
UPPERCASE_O = 'O' #: O
UPPERCASE_P = 'P' #: P
UPPERCASE_Q = 'Q' #: Q
UPPERCASE_R = 'R' #: R
UPPERCASE_S = 'S' #: S
UPPERCASE_T = 'T' #: T
UPPERCASE_U = 'U' #: U
UPPERCASE_V = 'V' #: V
UPPERCASE_W = 'W' #: W
UPPERCASE_X = 'X' #: X
UPPERCASE_Y = 'Y' #: Y
UPPERCASE_Z = 'Z' #: Z
OPENING_SQUARE_BRACKET = '[' #: [
BACKSLASH = '\' #: \
CLOSING_SQUARE_BRACKET = ']' #: ]
CARET = '^' #: ^
UNDERSCORE = '_' #: _
GRAVE_ACCENT = '`' #:
LOWERCASE_A = 'a' #: a
LOWERCASE_B = 'b' #: b
LOWERCASE_C = 'c' #: c
LOWERCASE_D = 'd' #: d
LOWERCASE_E = 'e' #: e
LOWERCASE_F = 'f' #: f
LOWERCASE_G = 'g' #: g
LOWERCASE_H = 'h' #: h
LOWERCASE_I = 'i' #: i
LOWERCASE_J = 'j' #: j
LOWERCASE_K = 'k' #: k
LOWERCASE_L = 'l' #: l
LOWERCASE_M = 'm' #: m
LOWERCASE_N = 'n' #: n
LOWERCASE_O = 'o' #: o
LOWERCASE_P = 'p' #: p
LOWERCASE_Q = 'q' #: q
LOWERCASE_R = 'r' #: r
LOWERCASE_S = 's' #: s
LOWERCASE_T = 't' #: t
LOWERCASE_U = 'u' #: u
LOWERCASE_V = 'v' #: v
LOWERCASE_W = 'w' #: w
LOWERCASE_X = 'x' #: x
LOWERCASE_Y = 'y' #: y
LOWERCASE_Z = 'z' #: z
OPENING_CURLY_BRACE = '{' #: {
LEFT_CURLY_BRACE = '{' #: {
VERTICAL_BAR = '|' #: |
CLOSING_CURLY_BRACE = '}' #: }
RIGHT_CURLY_BRACE = '}' #: }
TILDE = '~' #: ~
# ISO-8859-1 Characters
AGRAVE = 'À' #: À
AACUTE = 'Á' #: Á
ACIRC = 'Â' #: Â
ATILDE = 'Ã' #: Ã
AUML = 'Ä' #: Ä
ARING = 'Å' #: Å
AELIG = 'Æ' #: Æ
CCEDIL = 'Ç' #: Ç
EGRAVE = 'È' #: È
EACUTE = 'É' #: É
ECIRC = 'Ê' #: Ê
EUML = 'Ë' #: Ë
IGRAVE = 'Ì' #: Ì
IACUTE = 'Í' #: Í
ICIRC = 'Î' #: Î
IUML = 'Ï' #: Ï
ETH = 'Ð' #: Ð
NTILDE = 'Ñ' #: Ñ
OGRAVE = 'Ò' #: Ò
OACUTE = 'Ó' #: Ó
OCIRC = 'Ô' #: Ô
OTILDE = 'Õ' #: Õ
OUML = 'Ö' #: Ö
OSLASH = 'Ø' #: Ø
UGRAVE = 'Ù' #: Ù
UACUTE = 'Ú' #: Ú
UCIRC = 'Û' #: Û
UUML = 'Ü' #: Ü
YACUTE = 'Ý' #: Ý
THORN = 'Þ' #: Þ
SZLIG = 'ß' #: ß
AGRAVE = 'à' #: à
AACUTE = 'á' #: á
ACIRC = 'â' #: â
ATILDE = 'ã' #: ã
AUML = 'ä' #: ä
ARING = 'å' #: å
AELIG = 'æ' #: æ
CCEDIL = 'ç' #: ç
EGRAVE = 'è' #: è
EACUTE = 'é' #: é
ECIRC = 'ê' #: ê
EUML = 'ë' #: ë
IGRAVE = 'ì' #: ì
IACUTE = 'í' #: í
ICIRC = 'î' #: î
IUML = 'ï' #: ï
ETH = 'ð' #: ð
NTILDE = 'ñ' #: ñ
OGRAVE = 'ò' #: ò
OACUTE = 'ó' #: ó
OCIRC = 'ô' #: ô
OTILDE = 'õ' #: õ
OUML = 'ö' #: ö
OSLASH = 'ø' #: ø
UGRAVE = 'ù' #: ù
UACUTE = 'ú' #: ú
UCIRC = 'û' #: û
UUML = 'ü' #: ü
YACUTE = 'ý' #: ý
THORN = 'þ' #: þ
YUML = 'ÿ' #: ÿ
# ISO-8859-1 Symbols
NBSP = ' ' #:
IEXCL = '¡' #: ¡
CENT = '¢' #: ¢
POUND = '£' #: £
CURREN = '¤' #: ¤
YEN = '¥' #: ¥
BRVBAR = '¦' #: ¦
SECT = '§' #: §
UML = '¨' #: ¨
COPY = '©' #: ©
COPYRIGHT = '©' #: ©
ORDF = 'ª' #: ª
LAQUO = '«' #: «
NOT = '¬' #: ¬
# ­ ­ Soft hyphen
REG = '®' #: ®
MACR = '¯' #: ¯
DEG = '°' #: °
PLUSMN = '±' #: ±
SUP2 = '²' #: ²
SUP3 = '³' #: ³
ACUTE = '´' #: ´
MICRO = 'µ' #: µ
PARA = '¶' #: ¶
CEDIL = '¸' #: ¸
SUP1 = '¹' #: ¹
ORDM = 'º' #: º
RAQUO = '»' #: »
FRAC14 = '¼' #: ¼
FRAC12 = '½' #: ½
FRAC34 = '¾' #: ¾
IQUEST = '¿' #: ¿
TIMES = '×' #: ×
DIVIDE = '÷' #: ÷
# Math Symbols
FORALL = '∀' #: ∀
PART = '∂' #: ∂
EXIST = '∃' #: ∃
EMPTY = '∅' #: ∅
NABLA = '∇' #: ∇
ISIN = '∈' #: ∈
NOTIN = '∉' #: ∉
NI = '∋' #: ∋
PROD = '∏' #: ∏
SUM = '∑' #: ∑
MINUS = '−' #: −
LOWAST = '∗' #: ∗
RADIC = '√' #: √
PROP = '∝' #: ∝
INFIN = '∞' #: ∞
ANG = '∠' #: ∠
AND = '∧' #: ∧
OR = '∨' #: ∨
CAP = '∩' #: ∩
CUP = '∪' #: ∪
INT = '∫' #: ∫
THERE4 = '∴' #: ∴
SIM = '∼' #: ∼
CONG = '≅' #: ≅
ASYMP = '≈' #: ≈
NE = '≠' #: ≠
EQUIV = '≡' #: ≡
LE = '≤' #: ≤
GE = '≥' #: ≥
SUB = '⊂' #: ⊂
SUP = '⊃' #: ⊃
NSUB = '⊄' #: ⊄
SUBE = '⊆' #: ⊆
SUPE = '⊇' #: ⊇
OPLUS = '⊕' #: ⊕
OTIMES = '⊗' #: ⊗
PERP = '⊥' #: ⊥
SDOT = '⋅' #: ⋅
# Greek Letters
ALPHA = 'Α' #: Α
BETA = 'Β' #: Β
GAMMA = 'Γ' #: Γ
DELTA = 'Δ' #: Δ
EPSILON = 'Ε' #: Ε
ZETA = 'Ζ' #: Ζ
ETA = 'Η' #: Η
THETA = 'Θ' #: Θ
IOTA = 'Ι' #: Ι
KAPPA = 'Κ' #: Κ
LAMBDA = 'Λ' #: Λ
MU = 'Μ' #: Μ
NU = 'Ν' #: Ν
XI = 'Ξ' #: Ξ
OMICRON = 'Ο' #: Ο
PI = 'Π' #: Π
RHO = 'Ρ' #: Ρ
SIGMA = 'Σ' #: Σ
TAU = 'Τ' #: Τ
UPSILON = 'Υ' #: Υ
PHI = 'Φ' #: Φ
CHI = 'Χ' #: Χ
PSI = 'Ψ' #: Ψ
OMEGA = 'Ω' #: Ω
ALPHA = 'α' #: α
BETA = 'β' #: β
GAMMA = 'γ' #: γ
DELTA = 'δ' #: δ
EPSILON = 'ε' #: ε
ZETA = 'ζ' #: ζ
ETA = 'η' #: η
THETA = 'θ' #: θ
IOTA = 'ι' #: ι
KAPPA = 'κ' #: κ
LAMBDA = 'λ' #: λ
MU = 'μ' #: μ
NU = 'ν' #: ν
XI = 'ξ' #: ξ
OMICRON = 'ο' #: ο
PI = 'π' #: π
RHO = 'ρ' #: ρ
SIGMAF = 'ς' #: ς
SIGMA = 'σ' #: σ
TAU = 'τ' #: τ
UPSILON = 'υ' #: υ
PHI = 'φ' #: φ
CHI = 'χ' #: χ
PSI = 'ψ' #: ψ
OMEGA = 'ω' #: ω
THETASYM = 'ϑ' #: ϑ
UPSIH = 'ϒ' #: ϒ
PIV = 'ϖ' #: ϖ
OELIG = 'Œ' #: Œ
oeLIG = 'œ' #: œ
SCARON = 'Š' #: Š
Scaron = 'Š' #: Š
scaron = 'š' #: š
YUML = 'Ÿ' #: Ÿ
FNOF = 'ƒ' #: ƒ
CIRC = 'ˆ' #: ˆ
TILDE = '˜' #: ˜
#     En space
#     Em space
#     Thin space
# ‌ ‌ Zero width non-joiner
# ‍ ‍ Zero width joiner
# ‎ ‎ Left-to-right mark
# ‏ ‏ Right-to-left mark
NDASH = '–' #: –
MDASH = '—' #: —
LSQUO = '‘' #: ‘
RSQUO = '’' #: ’
SBQUO = '‚' #: ‚
LDQUO = '“' #: “
RDQUO = '”' #: ”
BDQUO = '„' #: „
DAGGER = '†' #: †
DAGGER = '‡' #: ‡
BULL = '•' #: •
HELLIP = '…' #: …
PERMIL = '‰' #: ‰
PRIME = '′' #: ′
PRIME = '″' #: ″
LSAQUO = '‹' #: ‹
RSAQUO = '›' #: ›
OLINE = '‾' #: ‾
EURO = '€' #: €
TRADE = '™' #: ™
TRADEMARK = '™' #: ™
# ARROWS
LARR = '←' #: ←
LEFT = '←' #: ←
UARR = '↑' #: ↑
UP = '↑' #: ↑
RARR = '→' #: →
RIGHT = '→' #: →
DARR = '↓' #: ↓
DOWN = '↓' #: ↓
HARR = '↔' #: ↔
CRARR = '↵' #: ↵
LCEIL = '⌈' #: ⌈
RCEIL = '⌉' #: ⌉
LFLOOR = '⌊' #: ⌊
RFLOOR = '⌋' #: ⌋
LOZ = '◊' #: ◊
SPADES = '♠' #: ♠
CLUBS = '♣' #: ♣
HEARTS = '♥' #: ♥
DIAMS = '♦' #: ♦
DIAMONDS = '♦' #: ♦
SUNG = '♪' #: ♪
FLAT = '♭' #: ♭
NATUR = '♮' #: ♮
NATURAL = '♮' #: ♮
SHARP = '♯' #: ♯
CHECK = "✓" #: ✓
CHECKMARK = "✓" #: ✓
TICK = "✓" #: ✓
CROSS = "✗" #: ✗
OHM = 'Ω' #: Ω
MHO = '℧' #: ℧
FRAC13 = '⅓' #: ⅓
FRAC23 = '⅔' #: ⅔
FRAC15 = '⅕' #: ⅕
FRAC25 = '⅖' #: ⅖
FRAC35 = '⅗' #: ⅗
FRAC45 = '⅘' #: ⅘
FRAC16 = '⅙' #: ⅙
FRAC56 = '⅚' #: ⅚
FRAC18 = '⅛' #: ⅛
FRAC38 = '⅜' #: ⅜
FRAC58 = '⅝' #: ⅝
FRAC78 = '⅞' #: ⅞
STAR = "☆" #: ☆
STARF = "★" #: ★
BIGSTAR = "★"
PHONE = "☎" #: ☎
FEMALE = "♀" #: ♀
MALE = "♂" #: ♂
```
#### File: domonic/constants/keyboard.py
```python
class KeyCode():
A = '65' #:
ALTERNATE = '18' #:
B = '66' #:
BACKQUOTE = '192' #:
BACKSLASH = '220' #:
BACKSPACE = '8' #:
C = '67' #:
CAPS_LOCK = '20' #:
COMMA = '188' #:
COMMAND = '15' #:
CONTROL = '17' #:
D = '68' #:
DELETE = '46' #:
DOWN = '40' #:
E = '69' #:
END = '35' #:
ENTER = '13' #:
RETURN = '13' #:
EQUAL = '187' #:
ESCAPE = '27' #:
F = '70' #:
F1 = '112' #:
F10 = '121' #:
F11 = '122' #:
F12 = '123' #:
F13 = '124' #:
F14 = '125' #:
F15 = '126' #:
F2 = '113' #:
F3 = '114' #:
F4 = '115' #:
F5 = '116' #:
F6 = '117' #:
F7 = '118' #:
F8 = '119' #:
F9 = '120' #:
G = '71' #:
H = '72' #:
HOME = '36' #:
I = '73' #:
INSERT = '45' #:
J = '74' #:
K = '75' #:
L = '76' #:
LEFT = '37' #:
LEFTBRACKET = '219' #:
M = '77' #:
MINUS = '189' #:
N = '78' #:
NUMBER_0 = '48' #:
NUMBER_1 = '49' #:
NUMBER_2 = '50' #:
NUMBER_3 = '51' #:
NUMBER_4 = '52' #:
NUMBER_5 = '53' #:
NUMBER_6 = '54' #:
NUMBER_7 = '55' #:
NUMBER_8 = '56' #:
NUMBER_9 = '57' #:
NUMPAD = '21' #:
NUMPAD_0 = '96' #:
NUMPAD_1 = '97' #:
NUMPAD_2 = '98' #:
NUMPAD_3 = '99' #:
NUMPAD_4 = '100' #:
NUMPAD_5 = '101' #:
NUMPAD_6 = '102' #:
NUMPAD_7 = '103' #:
NUMPAD_8 = '104' #:
NUMPAD_9 = '105' #:
NUMPAD_ADD = '107' #:
NUMPAD_DECIMAL = '110' #:
NUMPAD_DIVIDE = '111' #:
NUMPAD_ENTER = '108' #:
NUMPAD_MULTIPLY = '106' #:
NUMPAD_SUBTRACT = '109' #:
O = '79' #:
P = '80' #:
PAGE_DOWN = '34' #:
PAGE_UP = '33' #:
PERIOD = '190' #:
Q = '81' #:
QUOTE = '222' #:
R = '82' #:
RIGHT = '39' #:
RIGHTBRACKET = '221' #:
S = '83' #:
SEMICOLON = '186' #:
SHIFT = '16' #: ?? left or right or both?
SLASH = '191' #:
SPACE = '32' #:
T = '84' #:
TAB = '9' #:
U = '85' #:
UP = '38' #:
V = '86' #:
W = '87' #:
X = '88' #:
Y = '89' #:
Z = '9' #:
# TODO - do the modifiers
# find attribute by value
# def get_letter(self, attr):
# for key, value in self.__dict__.iteritems():
# if value == attr:
# return key
# return None
def __init__(self):
""" constructor for the keyboard class """
pass
```
#### File: domonic/domonic/decorators.py
```python
import warnings
import functools
from functools import wraps
def el(element='div', string=False):
"""[wraps the results of a function in an element]"""
if isinstance(element, str):
# tag = __import__('domonic.html.' + element)
# print(tag)
# - TODO - get element by name required on html class
from domonic.html import tag, tag_init
from domonic.dom import Element
element = type(element, (tag, Element), {'name': element, '__init__': tag_init})
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
if string == False:
return element(result)
else:
return str(element(result))
return wrapper
return decorator
# @el(div)
# @el(span)
def called(before=None, error=None):
"""[calls before() passing the response as args to the decorated function.
optional error handler. run the decorated function immediately.
WARNING: this is not for the regular decorating of a function
its syntactical sugar for a callback i.e.
@called(
lambda: º.ajax('https://www.google.com'),
lambda err: print('error:', err))
def success(data=None):
print("sweet!")
print(data)
"""
def decorator(function):
nonlocal before
nonlocal error
try:
if before is None:
return function()
r = before()
return function(r)
except Exception as e:
if error is not None:
error(e)
else:
raise e
return decorator
iife = called # pass None for an iife
# def static(endpoint, update="11101"):
# '''
# render the endpoint to a cron timestamp. when user vists that function.
# it will load the rendered version instead of executing the function.
# '''
# def dont_do_it(f):
# return None
# return dont_do_it
# https://www.python.org/dev/peps/pep-0318/
# https://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
def accepts(*types):
def check_accepts(f):
assert len(types) == f.__code__.co_argcount
def new_f(*args, **kwds):
for (a, t) in zip(args, types):
assert isinstance(a, t), \
"arg %r does not match %s" % (a, t)
return f(*args, **kwds)
new_f.__name__ = f.__name__
return new_f
return check_accepts
# @accepts(int)
def silence(*args, **kwargs):
""" stop a function from doing anything """
def dont_do_it(f):
return None
return dont_do_it
# @silence
def check(f):
""" logs entry and exit of a function """
def new_f():
print("Entering", f.__name__)
f()
print("Exited", f.__name__)
return new_f
# @check()
def log(logger, level='info'):
""" @log(logging.getLogger('main'), level='warning') """
def log_decorator(fn):
@functools.wraps(fn)
def wrapper(*a, **kwa):
getattr(logger, level)(fn.__name__)
return fn(*a, **kwa)
return wrapper
return log_decorator
def instead(f, somethingelse):
""" what to return if it fails """
def new_f():
try:
return f()
except Exception as e:
print('failed', e)
return somethingelse
return new_f
# @instead("something else instead of what was supposed to happen")
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
# considered this a few times
# def catch(f):
# """ catch exceptions and return None """
# def new_f():
# try:
# return f()
# except Exception as e:
# print('failed', e)
# return None
# return new_f
# def lenient(*args, **kwargs):
# """ can try to remap args if passed incorrectly.
# i.e. if expecting array but gets string, puts string in arr
# should never switch order probably. just re-type
# prints warning and runs
# """
'''
def aka(names):
""" @aka(*mylist) """
def aka_decorator(fn):
@functools.wraps(fn)
def wrapper(*a, **kwa):
return fn(*a, **kwa)
return wrapper
return log_decorator
'''
```
#### File: domonic/domonic/javascript.py
```python
import sys
import urllib.parse
from dateutil.parser import parse
import datetime
from datetime import timedelta
import time
from urllib.parse import unquote, quote
import math
import random
import threading
import signal
import typing
import requests
import gc
import multiprocessing
from multiprocessing.pool import ThreadPool as Pool
import re
import json
import os
# true = True
# false = False
class Object(object):
""" Creates a Mock Javascript Object in python """
def __init__(self, obj=None, *args, **kwargs):
"""[Creates a Mock Javascript Object in python]
Args:
obj ([type]): [pass an object, dict or callable to the contructor]
"""
# print('object created!')
if obj is None:
obj = {}
self.__attribs__ = {}
if callable(obj):
self.__attribs__ = {}
self.__attribs__['__call__'] = obj
# self.__attribs__['__call__'].__name__ = '__call__'
# self.__attribs__['__call__'].__doc__ = 'The function object itself.'
# self.__attribs__['__call__'].__module__ = '__main__'
elif isinstance(obj, dict):
self.__attribs__ = obj # set the dict as the attribs
# self.__attribs__['__dict__'] = obj # set the dict as the attribs
self.__dict__ = {**self.__dict__, **obj} # set the dict as the attribs?
else:
try:
self.__attribs__ = {}
self.__attribs__.update(obj.__attribs__)
self.__attribs__.update(kwargs)
self.__attribs__.update(args)
self.__attribs__['__class__'] = obj.__class__.__name__
self.__attribs__['__module__'] = obj.__module__
self.__attribs__['__doc__'] = obj.__doc__
self.__attribs__['__proto__'] = obj
# self.__attribs__['__proto__'].__class__ = Object
# self.__attribs__['__proto__'].__attribs__ = self.__attribs__
except Exception as e:
print("Object.__init__() failed to set attribs", e)
def __str__(self):
""" Returns a string representation of the object."""
# return self.toString()
return str(self.__attribs__)
# def __repr__(self):
# """ Returns a string representation of the object."""
# return self.toString()
@staticmethod
def fromEntries(entries):
"""
transforms a list of lists containing key and value into an object.
@param entries: a list containing key and value tuples. The key and value are separated by ':'
@type entries: list of tuple(string, string)
@returns: a dict object.
>>> fromEntries(entries)
{'a': 1, 'b': 2, 'c': 3}
"""
return {k: v for k, v in entries}
@staticmethod
def assign(target, source):
""" Copies the values of all enumerable own properties from one or more source objects to a target object. """
if isinstance(target, dict):
if isinstance(source, dict):
for k, v in source.items():
target[k] = v
else:
for k, v in source.__attribs__.items():
target[k] = v
else:
if isinstance(source, dict):
for k, v in source.items():
setattr(target, k, v)
else:
for k, v in source.attribs.items():
setattr(target, k, v)
# return target
# for prop in source.__attribs__:
# if source.propertyIsEnumerable(prop):
# target.__attribs__[prop] = source.__attribs__[prop]
return target
@staticmethod
def create(proto, propertiesObject=None):
""" Creates a new object with the specified prototype object and properties. """
if propertiesObject is None:
return Object(proto)
if isinstance(propertiesObject, dict):
return Object(propertiesObject)
elif isinstance(propertiesObject, Object):
return propertiesObject
elif isinstance(propertiesObject, list):
return Object.fromEntries(propertiesObject)
else:
return propertiesObject
# return Object(propertiesObject)
# obj = {}
# for key in proto.keys():
# obj[key] = propertiesObject[key]
# return obj
@staticmethod
def defineProperty(obj, prop, descriptor):
""" Adds the named property described by a given descriptor to an object. """
obj[prop] = descriptor
# @staticmethod
# def defineProperties(obj, props):
# """ Adds the named properties described by the given descriptors to an object. """
# for prop, desc in props.items():
# obj.__define_property__(prop, desc) # TODO - obviously that wont work
@staticmethod
def entries(obj):
""" Returns an array containing all of the [key, value] pairs in the object. """
if isinstance(obj, dict):
return [[k, v] for k, v in obj.items()]
if isinstance(obj, (float, int)):
return []
@staticmethod
def keys(obj):
""" Returns an array containing the names of all of the given object's own enumerable string properties."""
if isinstance(obj, dict):
return obj.keys()
if isinstance(obj, (float, int)):
return []
return obj.__attribs__.keys() # TODO - this is probably wrong
@staticmethod
def values(obj):
""" Returns an array containing the values that correspond to
all of a given object's own enumerable string properties. """
if isinstance(obj, dict):
return obj.values()
if isinstance(obj, (float, int)):
return []
return obj.__attribs__.values() # TODO - this is probably wrong
@staticmethod
def getOwnPropertyDescriptor(obj, prop):
""" Returns a property descriptor for a named property on an object. """
if isinstance(obj, dict):
return obj[prop]
return obj.__attribs__[prop]
@staticmethod
def getOwnPropertyNames(obj):
""" Returns an array containing the names of all of the given object's
own enumerable and non-enumerable properties. """
if isinstance(obj, dict):
return obj.keys()
elif isinstance(obj, Object):
return obj.__attribs__.keys()
elif isinstance(obj, object):
return [prop for prop in dir(obj) if not prop.startswith('__')]
return obj.__attribs__.keys()
# @staticmethod
# def _is(value1, value2):
# """ Compares if two values are the same value.
# Equates all NaN values (which differs from both Abstract Equality Comparison and Strict Equality Comparison)."""
# pass
@staticmethod
def getOwnPropertySymbols(obj):
""" Returns an array of all symbol properties found directly upon a given object. """
if isinstance(obj, dict):
return []
return [prop for prop in dir(obj) if not prop.startswith('__')]
@staticmethod
def getPrototypeOf(obj):
""" Returns the prototype (internal [[Prototype]] property) of the specified object. """
if isinstance(obj, dict):
return obj
elif isinstance(obj, Object):
return obj.prototype
elif isinstance(obj, object):
return obj.__class__
return obj.__proto__
# @staticmethod
# def isExtensible(obj):
# """ Determines if extending of an object is allowed. """
# if isinstance(obj, dict):
# return True
# elif isinstance(obj, Object):
# return obj.extensible
# elif isinstance(obj, object):
# return True
# return False
# @staticmethod
# def isFrozen(obj):
# """ Determines if an object was frozen. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# return obj.frozen
# elif isinstance(obj, object):
# return False
# return False
# @staticmethod
# def isSealed(obj):
# """ Determines if an object is sealed. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# return obj.sealed
# elif isinstance(obj, object):
# return False
# return False
# @staticmethod
# def preventExtensions(obj):
# """ Prevents any extensions of an object. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.extensible = False
# return True
# elif isinstance(obj, object):
# return False
# return False
# @staticmethod
# def seal(obj):
# """ Prevents other code from deleting properties of an object. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.sealed = True
# return True
# elif isinstance(obj, object):
# return False
# return False
# @staticmethod
# def setPrototypeOf(obj, prototype):
# """ Sets the object's prototype (its internal [[Prototype]] property). """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.prototype = prototype
# return True
# elif isinstance(obj, object):
# return False
# return False
# @staticmethod
# def freeze(obj):
# """ Freezes an object. Other code cannot delete or change its properties. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.frozen = True
# return True
# elif isinstance(obj, object):
# return False
# return False
def prototype(self, obj):
"""
prototype and allows you to add properties and methods to this object
"""
if isinstance(obj, dict):
return False
elif isinstance(obj, Object):
obj.prototype = self
return True
elif isinstance(obj, object):
return False
return False
# def __getattr__(self, name):
# """
# The __getattr__() method is called when the attribute 'name' is accessed.
# """
# if name == 'objectId':
# return self.getId()
# if name == 'proto':
# return self.__class__.fromEntries(self.__attribs__)
# return getattr(self, name)
def __defineGetter__(self, prop, func):
""" Adds a getter function for the specified property. """
self.__attribs__[prop] = property(func)
return self
def __defineSetter__(self, prop, func):
""" Associates a function with a property that, when set, calls the function. """
self.__attribs__[prop] = property(func)
return self
def __lookupGetter__(self, prop):
"""
Returns the getter function for the specified property.
"""
return self.__attribs__[prop]
def __lookupSetter__(self, prop):
""" Returns the function associated with the specified property by the __defineSetter__() method. """
return self.__attribs__[prop]
def hasOwnProperty(self, prop):
""" Returns a boolean indicating whether an object contains the specified property
as a direct property of that object and not inherited through the prototype chain. """
# raise NotImplementedError
# return hasattr(self, prop)
return self.__attribs__.get(prop, None) != None
def isPrototypeOf(self, obj):
""" Returns a boolean indicating whether an object is a copy of this object. """
if isinstance(obj, Object):
return obj.prototype == self
elif isinstance(obj, dict):
return obj == self
elif isinstance(obj, object):
return obj.__class__ == self.__class__ and obj.__dict__ == self.__dict__
return obj.__class__ == self.__class__ and obj.__proto__ == self
# def propertyIsEnumerable(self, prop):
# """ Returns a boolean indicating whether the specified property is enumerable. """
# pass
def toLocaleString(self):
""" Calls toString()"""
return self.toString()
def toString(self):
""" Returns a string representation of the object."""
return '[' + self.__class__.__name__ + ': ' + str(self.__attribs__) + ']'
def valueOf(self):
""" Returns the value of the object. """
return self
# def __str__(self):
# """ Returns a string representation of the object. """
# return self.toString()
# def __repr__(self):
# """ Returns a string representation of the object. """
# return self.toString()
def __iter__(self):
""" Iterates over object's properties. """
for prop in self.__attribs__:
yield prop
for key in self.__dict__:
yield key
return
def __hash__(self):
""" Returns the hash of the object. """
return hash(self.toString())
def __eq__(self, other):
""" Compares two objects. """
if isinstance(other, Object):
return self.toString() == other.toString()
return False
def __ne__(self, other):
""" Compares two objects. """
if isinstance(other, Object):
return self.toString() != other.toString()
return True
def __nonzero__(self):
""" Returns whether the object is false. """
return self.toString() != ''
def __bool__(self):
""" Returns whether the object is false. """
return self.toString() != ''
# def __dict__(self):
# """ Returns the object's attributes as a dictionary. """
# return self.__attribs__
def __getitem__(self, key):
""" Returns the value of the specified property. """
return self.__attribs__[key]
def __setitem__(self, key, value):
""" Sets the value of the specified property. """
self.__attribs__[key] = value
def __delitem__(self, key):
""" Deletes the specified property. """
del self.__attribs__[key]
def __len__(self):
""" Returns the number of properties. """
return len(self.__attribs__)
def __contains__(self, key):
""" Returns whether the specified property exists. """
return key in self.__attribs__
# def __call__(self, *args, **kwargs):
# """ Calls the object. """
# return self.toString()
class Function(object):
""" a Function object """
def __init__(self, func):
self.func = func
self.arguments = []
self.caller = None
self.displayName = None
self.length = None
self.name = None
# self.isCallable = True
# self.constructor = False
# self.__proto__ = None
# self.prototype = None
# self.extensible = True
# self.frozen = False
# self.sealed = False
# self.__class__ = Function
# self.__dict__ = {}
# self.__attribs__ = {}
# self.__methods__ = {}
def apply(self, thisArg=None, *args, **kwargs):
"""[calls a function with a given this value, and arguments provided as an array]
Args:
thisArg ([type]): [The value of this provided for the call to func.]
Returns:
[type]: [result of calling the function.]
"""
if thisArg is not None:
return self.func(args)
return self.func()
def bind(self, thisArg, *args, **kwargs):
"""[creates a new function that, when called,
has its this keyword set to the provided value,
with a given sequence of arguments preceding any provided when the new function is called.]
Args:
thisArg ([type]): [The value to be passed as the this parameter to the target
function func when the bound function is called.]
Returns:
[type]: [A copy of the given function with the specified this value, and initial arguments (if provided).]
"""
from functools import partial
bound_f = partial(self.func, *args, *kwargs)
return bound_f
# raise NotImplementedError
# @staticmethod
def call(self, thisArg=None, *args, **kwargs):
"""[calls a function with a given this value and arguments provided individually.]
Args:
thisArg ([type]): [description]
Returns:
[type]: [result of calling the function.]
"""
# raise NotImplementedError
# print('CALL!!')
# print(thisArg)
# print(args)
if thisArg is not None:
return self.func(thisArg, args)
# return self.func(this=thisArg, *args)
return self.func(args)
def toString(self):
"""[Returns a string representing the source code of the function. Overrides the]
"""
raise NotImplementedError
class Map(object):
""" Map holds key-value pairs and remembers the original insertion order of the keys.
"""
def __init__(self, collection):
"""[Pass a list or collection to make a Map object]
Args:
collection ([type]): [a list or dict]
"""
# parses the passed collectionn
if isinstance(collection, list):
# create a dict from the list
self.collection = dict(zip(collection, collection))
if isinstance(collection, dict):
# use the passed dict
self.collection = collection
else:
raise TypeError("Map requires a list or dict.")
self._data = {}
self._order = []
def __contains__(self, key):
return key in self._dict
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
if key not in self._dict:
self._order.append(key)
self._dict[key] = value
def __delitem__(self, key):
self._order.remove(key)
del self._dict[key]
def clear(self):
""" Removes all key-value pairs from the Map object. """
self._data = {}
self._order = []
def delete(self, key):
""" Returns true if an element in the Map object existed and has been removed,
or false if the element does not exist. Map.prototype.has(key) will return false afterwards. """
try:
self._order.remove(key)
del self._dict[key]
return True
except Exception:
return False
def get(self, key, default=None):
""" Returns the value associated to the key, or undefined if there is none. """
return self._dict.get(key, default)
def has(self, key):
""" Returns a boolean asserting whether a value has been associated to the key in the Map object or not."""
return key in self._dict
def set(self, key, value):
""" Sets the value for the key in the Map object. Returns the Map object. """
if key not in self._dict:
self._order.append(key)
self._dict[key] = value
return self
def iterkeys(self):
return iter(self._order)
def iteritems(self):
for key in self._order:
yield key, self._dict[key]
def keys(self):
""" Returns a new Iterator object that contains the keys
for each element in the Map object in insertion order. """
return list(self.iterkeys())
def values(self):
""" Returns a new Iterator object that contains the values
for each element in the Map object in insertion order. """
return list(self.iteritems())
def entries(self):
""" Returns a new Iterator object that contains an array of [key, value]
for each element in the Map object in insertion order. """
return [(x, self._dict[x]) for x in self._order]
# def forEach(self, callbackFn[, thisArg]):
# raise NotImplementedError
def update(self, ordered_dict):
for key, value in ordered_dict.iteritems():
self[key] = value
def __str__(self):
return str([(x, self._dict[x]) for x in self._order])
class FormData(object):
"""[utils for a form]
Args:
object ([str]): [takes a string or pyml object and returns a FormData]
"""
def __init__(self, form):
""" creates a new FormData object. """
# TODO - parse to domonic.
# if isinstance(form, str):
# self._data = domonic.loads(form) # TODO - parser wont be done enough yet
# if isinstance(form, Node):
# self._data = form
raise NotImplementedError
def append(self, name, value, filename):
""" Appends a new value onto an existing key inside a FormData object,
or adds the key if it does not already exist. """
raise NotImplementedError
def delete(self, name):
""" Deletes a key/value pair from a FormData object. """
raise NotImplementedError
def entries(self):
""" Returns an iterator allowing to go through all key/value pairs contained in this object. """
raise NotImplementedError
def get(self, name):
""" Returns the first value associated with a given key from within a FormData object. """
raise NotImplementedError
def getAll(self, name):
""" Returns an array of all the values associated with a given key from within a FormData """
raise NotImplementedError
def has(self, name):
""" Returns a boolean stating whether a FormData object contains a certain key."""
raise NotImplementedError
def keys(self):
""" Returns an iterator allowing to go through all keys of the key/value pairs contained in this object."""
raise NotImplementedError
def set(self, name, value, filename):
""" Sets a new value for an existing key inside a FormData object,
or adds the key/value if it does not already exist."""
raise NotImplementedError
def values(self):
""" Returns an iterator allowing to go through all values contained in this object."""
raise NotImplementedError
class Worker(object):
"""[A background task that can be created via script, which can send messages back to its creator.
Creating a worker is done by calling the Worker("path/to/worker/script") constructor.]
TODO - JSWorker - Node
Args:
object ([str]): [takes a path to a python script]
"""
def __init__(self, script):
""" creates a new Worker object. """
raise NotImplementedError
def postMessage(self):
""" Sends a message — consisting of any object — to the worker's inner scope. """
raise NotImplementedError
def terminate(self):
""" Immediately terminates the worker. This does not let worker finish its operations; it is halted at once.
ServiceWorker instances do not support this method. """
raise NotImplementedError
class Math(Object):
""" Math class that mirrors javascript implementation.
i.e. you can pass strings and it will also work
Math.abs('-1')
"""
# CONSTANTS
PI = 3.141592653589793
E = 2.718281828459045
LN2 = 0.6931471805599453
LN10 = 2.302585092994046
LOG2E = 1.4426950408889634
LOG10E = 0.4342944819032518
SQRT1_2 = 0.7071067811865476
SQRT2 = 1.4142135623730951
def _force_number(func):
"""[private decorator to make Math behave like javascript and turn strings, bools and None into numbers]]
"""
def validation_decorator(*args, **kwargs):
params = list(args)
for i, n in enumerate(params):
if type(n) == list or type(n) == tuple:
if len(n) == 0:
params[i] = n = 0
elif len(n) == 1:
params[i] = n = n[0]
if type(n) == str:
if n == "":
params[i] = n = 0
continue
if n is None:
params[i] = 0
continue
if type(n) != float and type(n) != int:
try:
if '.' in n:
params[i] = float(n)
else:
params[i] = int(n)
except Exception:
# raise ValueError("")
# js returns None instead
pass
args = tuple(params)
try:
return func(*args)
except Exception:
return None
return validation_decorator
@staticmethod
@_force_number
def abs(x):
""" Returns the absolute value of x """
return abs(x)
@staticmethod
@_force_number
def acos(x):
""" Returns the arccosine of x, in radians """
return math.acos(x)
@staticmethod
@_force_number
def acosh(x):
""" Returns the hyperbolic arccosine of x """
return math.acosh(x)
@staticmethod
@_force_number
def asin(x):
""" Returns the arcsine of x, in radians """
return math.asin(x)
@staticmethod
@_force_number
def asinh(x):
""" Returns the hyperbolic arcsine of x """
return math.asinh(x)
@staticmethod
@_force_number
def atan(x):
""" Returns the arctangent of x as a numeric value between -PI/2 and PI/2 radians """
return math.atan(x)
@staticmethod
@_force_number
def atan2(x, y):
""" Returns the arctangent of the quotient of its arguments """
return math.atan2(x, y)
@staticmethod
@_force_number
def atanh(x):
""" Returns the hyperbolic arctangent of x """
return math.atanh(x)
@staticmethod
@_force_number
def cbrt(x):
""" Returns the cubic root of x """
# return math.cbrt(x)
return round(math.pow(x, 1 / 3))
@staticmethod
@_force_number
def ceil(x):
""" Returns x, rounded upwards to the nearest integer """
return math.ceil(x)
@staticmethod
@_force_number
def cos(x):
""" Returns the cosine of x (x is in radians) """
return math.cos(x)
@staticmethod
@_force_number
def cosh(x):
""" Returns the hyperbolic cosine of x """
return math.cosh(x)
@staticmethod
@_force_number
def exp(x):
""" Returns the value of Ex """
return math.exp(x)
@staticmethod
@_force_number
def floor(x):
""" Returns x, rounded downwards to the nearest integer """
return math.floor(x)
@staticmethod
@_force_number
def log(x, y):
""" Returns the natural logarithm (base E) of x """
return math.log(x, y)
@staticmethod
@_force_number
def max(x, y):
""" Returns the number with the highest value """
return max(x, y)
@staticmethod
@_force_number
def min(x, y):
""" Returns the number with the lowest value """
return min(x, y)
@staticmethod
@_force_number
def random():
""" Returns a random number between 0 and 1 """
# return math.random(x)
return random.random()
@staticmethod
@_force_number
def round(x):
""" Rounds x to the nearest integer """
return round(x)
@staticmethod
@_force_number
def pow(x, y):
""" Returns the value of x to the power of y """
return math.pow(x, y)
@staticmethod
@_force_number
def sin(x):
""" Returns the sine of x (x is in radians) """
return math.sin(x)
@staticmethod
@_force_number
def sinh(x):
""" Returns the hyperbolic sine of x """
return math.sinh(x)
@staticmethod
@_force_number
def sqrt(x):
""" Returns the square root of x """
return math.sqrt(x)
@staticmethod
@_force_number
def tan(x):
""" Returns the tangent of an angle """
return math.tan(x)
@staticmethod
@_force_number
def tanh(x):
""" Returns the hyperbolic tangent of a number """
return math.tanh(x)
@staticmethod
@_force_number
def trunc(x):
""" Returns the integer part of a number (x) """
return math.trunc(x)
# TODO - test
@staticmethod
# @_force_number
def hypot(*args):
""" returns the square root of the sum of squares of its arguments """
return math.hypot(*args)
# TODO - test
@staticmethod
# @_force_number
def log2(*args):
""" returns the square root of the sum of squares of its arguments """
return math.log2(*args)
# TODO - test
@staticmethod
# @_force_number
def loglp(*args):
""" returns the natural logarithm (base e) of 1 + a number, that is """
return math.loglp(*args)
# TODO - test
@staticmethod
@_force_number
def log10(x):
""" function returns the base 10 logarithm of a number, that is """
return math.log10(x)
# TODO - test
@staticmethod
@_force_number
def fround(x):
""" returns the nearest 32-bit single precision float representation of a Number """
# return math.log10(x)
raise NotImplementedError
# TODO - test
@staticmethod
@_force_number
def clz32(x):
""" returns the number of leading zero bits in the 32-bit binary representation of a number. """
raise NotImplementedError
# import urllib
class Global(object):
""" javascript global methods """
NaN = "NaN"
Infinity = float("inf")
# TODO - https://stackoverflow.com/questions/747641/what-is-the-difference-between-decodeuricomponent-and-decodeuri
@staticmethod
def decodeURI(x):
""" Decodes a URI """
return unquote(x)
@staticmethod
def decodeURIComponent(x):
""" Decodes a URI component """
return unquote(x, encoding="utf-8")
@staticmethod
def encodeURI(x):
""" Encodes a URI """
return quote(str(x), safe='~@#$&()*!+=:;,.?/\'')
@staticmethod
def encodeURIComponent(x):
""" Encodes a URI component """
return quote(str(x), safe='~()*!.\'')
# @staticmethod
# def escape():
""" Deprecated in version 1.5. Use encodeURI() or encodeURIComponent() """
# pass
@staticmethod
def eval(pythonstring):
""" Evaluates a string and executes it as if it was script code """
eval(pythonstring)
@staticmethod
def isFinite(x): # TODO - test
""" Returns true if x is a finite number """
return math.isfinite(x)
@staticmethod
def isNaN(x):
""" Determines whether a value is an illegal number """
try:
return math.isnan(x)
except TypeError:
return True
def NaN(self):
""" "Not-a-Number" value """
# return self.NaN
return "NaN"
@staticmethod
def Number(x):
""" Converts an object's value to a number """
try:
if type(x) == float or type(x) == int: # or type(x) == long:
return x
if type(x) == str:
if '.' in x:
return float(x)
else:
return int(x)
except Exception:
return "NaN"
return "NaN"
@staticmethod
def parseFloat(x: str):
""" Parses a string and returns a floating point number """
return float(x)
@staticmethod
def parseInt(x: str):
""" Parses a string and returns an integer """
return int(x)
@staticmethod
def String(x):
""" Converts an object's value to a string """
return str(x)
def undefined(self):
""" Indicates that a variable has not been assigned a value """
return None
# @staticmethod
# def unescape():
""" Deprecated in version 1.5. Use decodeURI() or decodeURIComponent() instead """
# pass
@staticmethod
def require(path: str):
""" Loads a script from a file """
# '.'.join(path.split('/'))
# module = __import__(path) # app.components.{component}
# my_class = getattr(module, component.title())
# return my_class()
raise NotImplementedError
@staticmethod
def setTimeout(callback, t, *args, **kwargs):
""" use threads to create a timeout method """
raise NotImplementedError
# TODO - clearTimeout.
@staticmethod
def clearTimeout(job):
# print(job)
job.cancel()
class Performance():
_start = time.time()
def __init__(self):
pass
def now(self):
end = time.time()
return end - Performance._start
# def reset(self):
# Performance._start = time.time()
performance = Performance()
class Date(Object):
""" javascript date """
def __init__(self, date: str = None, formatter='python'):
if date is None:
self.date = datetime.datetime.now()
else:
self.date = self.parse_date(date)
def parse_date(self, date_string):
self.date = parse(date_string)
return self.date
def getDate(self):
""" Returns the day of the month (from 1-31) """
return self.date.day
# TODO - do for a date object passed in. this only does today
def getDay(self):
""" Returns the day of the week (from 0-6) """
day = self.date.isoweekday()
return day if (day < 7) else 0
# TODO - do for a date object passed in. this only does today
def getFullYear(self):
""" Returns the year """
return self.date.now().year
def getHours(self):
""" Returns the hour (from 0-23) """
return self.date.now().hour
def getMilliseconds(self):
""" Returns the milliseconds (from 0-999) """
return round(self.date.now().microsecond / 1000)
def getMinutes(self):
""" Returns the minutes (from 0-59) """
return self.date.now().minute
def getMonth(self):
""" Returns the month (from 0-11) """
return self.date.now().month - 1
def getSeconds(self):
""" Returns the seconds (from 0-59) """
return self.date.now().second
def getTime(self):
""" Returns the number of milliseconds since midnight Jan 1 1970, and a specified date """
return int(str(time.time()).split('.')[0])
# TODO - whats difference between this and 'now()' ?
def getTimezoneOffset(self):
""" Returns the time difference between UTC time and local time, in minutes """
return self.date.now().utcoffset().total_seconds() / 60 # TODO - TEST
def getUTCDate(self):
""" Returns the day of the month, according to universal time (from 1-31) """
return self.date.utcnow().month
def getUTCDay(self):
""" Returns the day of the week, according to universal time (from 0-6) """
return self.date.utcnow().day
def getUTCFullYear(self):
""" Returns the year, according to universal time """
return self.date.utcnow().year
def getUTCHours(self):
""" Returns the hour, according to universal time (from 0-23) """
return self.date.utcnow().hour
def getUTCMilliseconds(self):
""" Returns the milliseconds, according to universal time (from 0-999) """
return round(self.date.utcnow().microsecond / 1000)
def getUTCMinutes(self):
""" Returns the minutes, according to universal time (from 0-59) """
return self.date.utcnow().minute
def getUTCMonth(self):
""" Returns the month, according to universal time (from 0-11) """
return self.date.utcnow().month - 1
def getUTCSeconds(self):
""" Returns the seconds, according to universal time (from 0-59) """
return self.date.utcnow().second
def getYear(self):
""" Deprecated. Use the getFullYear() method instead """
return self.date.now().year
@staticmethod
def now():
""" Returns the number of milliseconds since midnight Jan 1, 1970 """
return round(time.time() * 1000)
# @staticmethod
def parse(self, date_string):
""" Parses a date string and returns the number of milliseconds since January 1, 1970 """
self.date = self.parse_date(str(date_string)) # TODO - huh?
# return self.date.getTime()
def setDate(self, day):
""" Sets the day of the month of a date object """
self.date.replace(day=int(day))
# return self.date.getTime()
def setFullYear(self, year):
""" Sets the year of a date object """
self.date.replace(year=int(year))
# return self.date.getTime()
def setHours(self, hours):
""" Sets the hour of a date object """
self.date.replace(hour=int(hours))
# return self.date.getTime()
def setMilliseconds(self, milliseconds):
""" Sets the milliseconds of a date object """
# self.date.replace(millisecond=int(milliseconds))
# return self.now()
print('TODO: setMilliseconds')
pass
# TODO - , seconds = None, milliseconds = None):
def setMinutes(self, minutes):
""" Set the minutes of a date object """
self.date.replace(minute=int(minutes))
# return self.now()
def setMonth(self, month):
""" Sets the month of a date object """
self.date.replace(month=int(month))
# return self.now()
def setSeconds(self, seconds):
""" Sets the seconds of a date object """
self.date.replace(second=int(seconds))
# return self.now()
# Sets a date to a specified number of milliseconds after/before January 1, 1970
def setTime(self, milliseconds=None):
""" Sets the number of milliseconds since January 1, 1970 """
# test copilot
# self.date.replace(millisecond=int(milliseconds))
# return self.now() # TODO - is this right? - is this same as now()?
# print('TODO: setTime')
# raise NotImplementedErro
pass
def setUTCDate(self, day):
""" Sets the day of the month of a date object, according to universal time """
self.setDate(day)
# return self.getTime()
def setUTCFullYear(self, year):
""" Sets the year of a date object, according to universal time """
self.setFullYear(year)
# return self.getTime()
def setUTCHours(self, hour):
""" Sets the hour of a date object, according to universal time """
self.setHours(hour)
# return self.getTime()
def setUTCMilliseconds(self, milliseconds):
""" Sets the milliseconds of a date object, according to universal time """
self.setMilliseconds(milliseconds)
# return self.getTime()
def setUTCMinutes(self, minutes):
""" Set the minutes of a date object, according to universal time """
self.setMinutes(minutes)
# return self.getTime()
def setUTCMonth(self, month):
""" Sets the month of a date object, according to universal time """
self.setMonth(month)
# return self.getTime()
def setUTCSeconds(self, seconds):
""" Set the seconds of a date object, according to universal time """
self.setSeconds(seconds)
# return self.getTime()
def setYear(self, year):
""" Deprecated. Use the setFullYear() method instead """
self.date.replace(year=int(year))
# return self.getTime()
# TODO - there may not be a date object already?
def toDateString(self):
""" Converts the date portion of a Date object into a readable string """
return self.date.strftime('%Y-%m-%d')
def toUTCString(self):
""" Converts a Date object to a string, according to universal time """
return self.date.strftime('%Y-%m-%d %H:%M:%S')
def toGMTString(self):
""" Deprecated. Use the toUTCString() method instead """
return self.toUTCString()
def toJSON(self):
""" Returns the date as a string, formatted as a JSON date """
import json
return json.dumps(self.date.strftime('%Y-%m-%d'))
def toISOString(self):
""" Returns the date as a string, using the ISO standard """
return self.date.strftime('%Y-%m-%d')
def toLocaleDateString(self):
""" Returns the date portion of a Date object as a string, using locale conventions """
return self.date.strftime('%x')
def toLocaleString(self):
""" Converts a Date object to a string, using locale conventions """
return self.date.strftime('%x')
def toLocaleTimeString(self):
""" Returns the time portion of a Date object as a string, using locale conventions """
return self.date.strftime('%X')
def toTimeString(self):
""" Converts the time portion of a Date object to a string """
return self.date.strftime('%X')
def UTC(self):
""" Returns the number of milliseconds in a date since midnight of January 1, 1970, according to UTC time """
return self.date.utcnow()
class Screen(object):
""" screen """
# wrap a lib?
# https://github.com/rr-/screeninfo?
def __init__(self):
# from sys import platform
# if platform == "linux" or platform == "linux2":
# # linux
# import subprocess
# resuls = subprocess.Popen(['xrandr'],stdout=subprocess.PIPE).communicate()[0].split("current")[1].split(",")[0]
# width = resuls.split("x")[0].strip()
# heigth = resuls.split("x")[1].strip()
# print width + "x" + heigth
# elif platform == "darwin":
# # OS X
# results = str(subprocess.Popen(['system_profiler SPDisplaysDataType'],stdout=subprocess.PIPE, shell=True).communicate()[0])
# res = re.search('Resolution: \d* x \d*', results).group(0).split(' ')
# width, height = res[1], res[3]
# return width, height
# elif platform == "win32":
# from win32api import GetSystemMetrics
# print("Width =", GetSystemMetrics(0))
# print("Height =", GetSystemMetrics(1))
pass
def availHeight(self):
''' Returns the height of the screen (excluding the Windows Taskbar) '''
# return self.height
raise NotImplementedError
def availWidth(self):
''' Returns the width of the screen (excluding the Windows Taskbar) '''
raise NotImplementedError
def colorDepth(self):
''' Returns the colorDepth '''
raise NotImplementedError
def height(self):
''' Returns the total height of the screen '''
raise NotImplementedError
def pixelDepth(self):
''' Returns the pixelDepth '''
raise NotImplementedError
def width(self):
''' Returns the total width of the screen '''
raise NotImplementedError
class ProgramKilled(Exception):
pass
class Job(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = False
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
self.execute(*self.args, **self.kwargs)
# def __str__(self):
# return "Job every %s" % self.interval
class SetInterval(object):
def signal_handler(self, signum, frame):
raise ProgramKilled
def __init__(self, function, time, *args, **kwargs):
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGINT, self.signal_handler)
self.job = Job(timedelta(microseconds=time * 1000), function, *args, **kwargs)
self.job.start()
# def stop(self):
# self.job.stop()
class Promise(object):
# undocumented - warning. use at own risk
def __init__(self, func=None, *args, **kwargs):
# print('init')
self.data = None
self.state = 'pending' # fullfilled, rejected
if func is not None:
func(self.resolve, self.reject)
def then(self, func):
if func is not None:
# print('--->',self.data)
self.data = func(self.data)
# print('-->',self.data)
return self
def catch(self, error):
# func(error)
print(error)
return self
def resolve(self, data):
# print( 'resolve called::', data )
self.data = data
self.state = "fulfilled"
return self
def reject(self, data):
self.data = data
self.state = "rejected"
return self
# def __str__(self):
# try:
# return self.data.text
# except Exception as e:
# print(e)
# return str(self)
class FetchedSet(object): # not a promise
def __init__(self, *args, **kwargs):
self.results = []
def __getitem__(self, index):
return self.results[index]
def oncomplete(self, func): # runs once all results are back
func(self.results)
return
# def __call__(self, func):
# self.results.append(func)
class Storage():
def __init__(self, filepath=None):
"""[localstorage. destroys on each session unless you pass the optional filepath]
Args:
filepath ([type], optional): [filepath]. give us a file to write to
"""
self.storage = {}
self.has_file = False
if filepath:
self.filepath = filepath
self.has_file = True
# check if file exists. if so load it in . if not create it
if filepath:
if os.path.exists(filepath):
with open(filepath, 'r') as f:
self.storage = json.load(f)
else:
with open(filepath, 'w') as f:
json.dump(self.storage, f)
def __getitem__(self, key):
return self.storage[key]
def __setitem__(self, key, value):
self.storage[key] = value
if self.has_file:
self._save()
def __len__(self):
return len(self.storage.keys())
@property
def length(self):
""" Returns an integer representing the number of data items stored in the Storage object. """
return len(self.storage.keys())
def _save(self):
if self.has_file:
with open(self.filepath, 'w') as f:
json.dump(self.storage, f)
return True
return False
def setItem(self, keyName, keyValue):
""" Adds that key to the storage, or update that key's value if it already exists """
self.storage[keyName] = keyValue
self._update_file()
def key(self, keyName):
""" Returns the value of the key if it exists, otherwise returns None """
return self.storage.get(keyName, None)
def removeItem(self, keyName):
""" Removes the key and its value from the storage """
if keyName in self.storage:
del self.storage[keyName]
self._update_file()
class Window(object):
""" window """
localStorage = Storage()
def __init__(self, *args, **kwargs):
# self.console = dom.console
# self.document = dom.document
# self.location = ''#dom.location
self.location = None
# globals()?
# dir()?
# locals()?
@property
def location(self):
# print("@@@@@@@@@@@@@@@@@@@@@@")
return self.__location
@location.setter
def location(self, x):
# print("====================>>>>>>>", x)
# dom.location = x
# dom.location = dom.location(x)#Location(x)
# from .dom import Location
# print('test::::-------------------------------------------------------',Location)
# print("xxxxxxxx>>>>>>", dom.location)
# self.__location = dom.location
# import requests.
pass
@staticmethod
def alert(msg):
""" Displays an alert box with a message and an OK button """
print(msg)
return
@staticmethod
def prompt(msg, default_text=""):
""" Displays a dialog box that prompts the visitor for input """
print(msg)
data = input()
return data
@staticmethod
def setTimeout(function, t, *args, **kwargs):
""" Calls a function or evaluates an expression after a specified number of milliseconds """
import time
time.sleep(t / 1000) # TODO - still blocks
function()
return
# TODO - clearTimeout.
@staticmethod
def clearTimeout(job):
# job.stop()
pass
@staticmethod
def clearInterval(job):
job.stop()
@staticmethod
def setInterval(function, time, *args, **kwargs):
interval_ID = SetInterval(function, time, *args, **kwargs)
return interval_ID.job
@staticmethod
def _do_request(url, f=None, **kwargs):
# private - don't use directly. use one of the fetch methods
try:
# r = requests.get(url, timeout=3)
from requests import Request, Session
method = "GET"
if "method" in kwargs:
method = kwargs["method"]
if "callback_function" in kwargs:
del kwargs["callback_function"]
if "error_handler" in kwargs:
del kwargs["error_handler"]
s = Session()
req = Request(method, url)
prepped = s.prepare_request(req)
r = s.send(prepped, **kwargs)
# print(r.status_code)
s.close()
if f is not None and type(f) is FetchedSet:
f.results.append(r)
return r
except Exception as e:
print(f'Request Failed for URL: {url}', e)
return None
@staticmethod
def fetch(url: str, **kwargs):
# undocumented - warning. use at own risk
# note - kinda pointless atm. just use requests directly and you wont have to muck about with a Promise
if type(url) is not str:
raise ValueError('fetch takes a single url string. use fetch_set, fetch_threaded or fetch_pooled')
f = Promise()
r = window._do_request(url, f, *kwargs)
return f.resolve(r)
@staticmethod
def fetch_set(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
for url in urls:
r = window.fetch(url, **kwargs).then(callback_function)
f.results.append(r.data)
return f
@staticmethod
def fetch_threaded(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using threads
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
jobs = []
for url in urls:
thread = threading.Thread(target=window._do_request(url, f, **kwargs))
thread.setDaemon(True)
jobs.append(thread)
map(lambda j: j.start(), jobs)
map(lambda j: j.join(), jobs)
# f = FetchedSet()
return f
@staticmethod
def fetch_pooled(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using a pool
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
def _do_request_wrapper(obj):
url = obj['url']
f = obj['f']
kwargs = obj['k']
kwargs['callback_function'] = obj['c']
kwargs['error_handler'] = obj['e']
window._do_request(url, f, **kwargs)
jobs = []
p = Pool()
urls = [{'url': url, 'f': f, 'c': callback_function, 'e': error_handler, 'k': kwargs} for url in urls]
results = p.map(_do_request_wrapper, urls)
p.close()
p.join()
return f
# def fetch_aysnc( urls: list, options={}, type="async" ):
# TODO - a version using async/await
# @staticmethod
# @getter
# def navigator():
""" Returns the Navigator object for the window (See Navigator object) """
# return
@staticmethod
def btoa(dataString):
""" Encodes a string in base-64 """
import base64
dataBytes = dataString.encode("utf-8")
encoded = base64.b64encode(dataBytes)
return encoded
@staticmethod
def atob(dataString):
""" Decodes a base-64 encoded string """
import base64
return base64.b64decode(dataString).decode()
@staticmethod
def requestAnimationFrame(callback):
"""[requests a frame of an animation]
Args:
callback (callable): [the callback function]
Returns:
[type]: [description]
"""
perf = Global.performance.now()
return callback(perf)
# WINDOW
# localStorage Allows to save key/value pairs in a web browser. Stores the data with no expiration date Window
# blur() Removes focus from an element Element, Window
# clearTimeout() Clears a timer set with setTimeout() Window
# closed Returns a Boolean value indicating whether a window has been closed or not Window
# close() Closes the output stream previously opened with document.open() Document, Window
# confirm() Displays a dialog box with a message and an OK and a Cancel button Window
# defaultStatus Sets or returns the default text in the statusbar of a window Window
# defaultView Returns the window object associated with a document, or null if none is available. Document
# document Returns the Document object for the window (See Document object) Window
# focus() Gives focus to an element Element, Window
# frameElement Returns the <iframe> element in which the current window is inserted Window
# getComputedStyle() Gets the current computed CSS styles applied to an element Window
# getSelection() Returns a Selection object representing the range of text selected by the user Window
# history Returns the History object for the window (See History object) Window
# innerHeight Returns the height of the window's content area (viewport) including scrollbars Window
# innerWidth Returns the width of a window's content area (viewport) including scrollbars Window
# location Returns the Location object for the window (See Location object) Window
# matchMedia() Returns a MediaQueryList object representing the specified CSS media query string Window
# moveBy() Moves a window relative to its current position Window
# moveTo() Moves a window to the specified position Window
# name Sets or returns an error name Error, Attribute, Window
# navigator Returns the Navigator object for the window (See Navigator object) Window
# onpopstate The event occurs when the window's history changes PopStateEvent
# open() Opens an HTML output stream to collect output from document.write() Document, Window
# opener Returns a reference to the window that created the window Window
# outerHeight Returns the height of the browser window, including toolbars/scrollbars Window
# outerWidth Returns the width of the browser window, including toolbars/scrollbars Window
# pageXOffset Returns the pixels the current document has been scrolled (horizontally) from the upper left corner of the window Window
# pageYOffset Returns the pixels the current document has been scrolled (vertically) from the upper left corner of the window Window
# parent Returns the parent window of the current window Window
# _print() Prints the content of the current window Window
# resizeBy() Resizes the window by the specified pixels Window
# resizeTo() Resizes the window to the specified width and height Window
# screen Returns the Screen object for the window (See Screen object) Window
# screenLeft Returns the horizontal coordinate of the window relative to the screen Window
# screenTop Returns the vertical coordinate of the window relative to the screen Window
# scroll() Deprecated. This method has been replaced by the scrollTo() method. Window
# scrollBy() Scrolls the document by the specified number of pixels Window
# scrollIntoView() Scrolls the specified element into the visible area of the browser window Element
# scrollTo() Scrolls the document to the specified coordinates Window
# scrollX An alias of pageXOffset Window
# scrollY An alias of pageYOffset Window
# sessionStorage Allows to save key/value pairs in a web browser. Stores the data for one session Window
# setTimeout() Calls a function or evaluates an expression after a specified number of milliseconds Window
# stop() Stops the window from loading Window
# status Sets or returns the text in the statusbar of a window Window
# top Returns the topmost browser window Window
# view Returns a reference to the Window object where the event occurred UiEvent
window = Window
# class null():
# def __str__(self):
# return ''
# def __repr__(self):
# return None
class Array(object):
""" javascript array """
@staticmethod
def from_(object): # TODO - test
""" Creates a new Array instance from an array-like or iterable object. """
return Array(object)
@staticmethod
def isArray(object): # TODO - test
""" Returns true if the specified object is an Array instance. """
return type(object) is Array or type(object) is list or type(object) is tuple
@staticmethod
def of(*args): # TODO - test
""" Creates a new Array instance with a variable number of arguments, regardless of number or type of the arguments. """
return Array(args)
def __init__(self, *args):
"""[An Array that behaves like a js array]
"""
# casting
if len(args) == 1:
if isinstance(args[0], list):
self.args = args[0]
return
elif isinstance(args[0], int):
# self.args = [None] * args[0]
# self.args = [null()] * args[0]
self.args = [""] * args[0]
return
self.args = list(args)
def __getitem__(self, index):
return self.args[index]
def __setitem__(self, index, value):
self.args[index] = value
def __add__(self, value):
if isinstance(value, int):
raise ValueError('int not supported')
if isinstance(value, Array):
self.args = self.args + value.args
if isinstance(value, list):
self.args = self.args + value
return self.args
def __len__(self):
return len(self.args)
def __eq__(self, other):
return isinstance(other, Array) and \
self.args == other.args
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self.args)
def __iter__(self):
for i in self.args:
yield i
def __sub__(self, value):
if isinstance(value, int):
raise ValueError('int not supported')
if isinstance(value, Array):
self.args = self.args - value.args
if isinstance(value, list):
self.args = self.args - value
return self.args
def toString(self):
''' Converts an array to a string, and returns the result '''
return str(self.args) # TODO - check what js does
@property
def length(self):
""" Sets or returns the number of elements in an array """
return len(self.args)
def concat(self, *args):
"""[Joins two or more arrays, and returns a copy of the joined arrays]
Returns:
[list]: [returns a copy of the joined arrays]
"""
for a in args:
self.args += a
return self.args
def fill(self):
""" Fill the elements in an array with a static value """
raise NotImplementedError
def includes(self, value):
""" Check if an array contains the specified element """
if value in self.args:
return True
else:
return False
def indexOf(self, value):
""" Search the array for an element and returns its position """
# for count, each in enumerate(self.args):
# if each == value:
# return count
try:
return self.args.index(value)
except ValueError:
return -1
except Exception as e:
# print(e)
return -1
@staticmethod
def isArray(self, thing):
"""[Checks whether an object is an array]
Args:
thing ([type]): [thing to check]
Returns:
[bool]: [True if the object is list, tuple or Array]
"""
if isinstance(thing, (list, tuple, Array)):
return True
else:
return False
def join(self, value):
""" Joins all elements of an array into a string """
# TODO - get passed param names
return value.join([str(x) for x in self.args])
def lastIndexOf(self, value):
""" Search the array for an element, starting at the end, and returns its position """
try:
return len(self.args) - self.args[::-1].index(value) - 1
except Exception as e:
# print(e)
return None
def pop(self):
""" Removes the last element of an array, and returns that element """
# item = self.args[len(self.args)-1]
# del self.args[len(self.args)-1]
return self.args.pop()
def push(self, value):
""" Adds new elements to the end of an array, and returns the new length """
self.args.append(value)
return len(self.args)
def reverse(self):
""" Reverses the order of the elements in an array """
self.args = self.args[::-1]
return self.args
def slice(self, start=0, stop=None, step=1):
"""[Selects a part of an array, and returns the new array]
Args:
start ([int]): [index to slice from]
stop ([int], optional): [index to slice to]. Defaults to end of the array.
step (int, optional): [description]. Defaults to 1.
Returns:
[type]: [new array]
"""
if stop is None:
stop = len(self.args)
return self.args[slice(start, stop, step)]
def splice(self, start, delete_count=None, *items):
""" Selects a part of an array, and returns the new array """
if delete_count is None:
delete_count = len(self.args) - start
total = start + delete_count
removed = self.args[start:total]
self.args[start:total] = items
return removed
# return self.args
def unshift(self, *args):
"""[Adds new elements to the beginning of an array, and returns the new length]
Returns:
[int]: [the length of the array]
"""
for i in reversed(args):
self.args.insert(0, i)
return len(self.args)
def shift(self):
"""[removes the first element from an array and returns that removed element]
Returns:
[type]: [the removed array element]
"""
item = self.args[0]
del self.args[0]
return item
def map(self, func):
"""[Creates a new array with the result of calling a function for each array element]
Args:
func ([type]): [a function to call on each array element]
Returns:
[list]: [a new array]
"""
# print(func)
return [func(value) for value in self.args]
# return map(self.args, func)
def some(self, func):
""" Checks if any of the elements in an array pass a test """
return any(func(value) for value in self.args)
def sort(self, func=None): # , *args, **kwargs):
""" Sorts the elements of an array """
if func is not None:
return self.args.sort(key=func(*self.args))
return sorted(self.args)
def reduce(self, func, value=None):
""" Reduce the values of an array to a single value (going left-to-right) """
try:
return func(self.args[0], *self.args)
except IndexError:
return -1
def reduceRight(self, func, value=None):
""" Reduce the values of an array to a single value (going right-to-left) """
# written by .ai (https://6b.eleuther.ai/)
# Takes an array and reduces it based on a function in reverse order
try:
if value is None:
return func(value, *self.args)
else:
return func(self.args[0], value, *self.args[1:])
except IndexError:
return -1
def filter(self, func):
"""
Creates a new array with every element in an array that pass a test
i.e. even_numbers = someArr.filter( lambda x: x % 2 == 0 )
"""
# written by .ai (https://6b.eleuther.ai/)
# filtered = []
# for value in self.args:
# if func(value):
# filtered.append(value)
# return filtered
return list(filter(func, self.args))
def find(self, func):
""" Returns the value of the first element in an array that pass a test """
for each in self.args:
if func(each):
return each
def findIndex(self, value):
""" Returns the index of the first element in an array that pass a test """
# written by .ai (https://6b.eleuther.ai/)
for i, value in enumerate(self.args):
if value == value:
return i
return -1
def forEach(self, func):
""" Calls a function for each array element """
# written by .ai (https://6b.eleuther.ai/)
for value in self.args:
func(value)
# def from(self, obj):
# """[Creates an array from an object]
# Args:
# obj ([type]): [description]
# """
# return [obj]
# return [obj]
def keys(self):
""" Returns a Array Iteration Object, containing the keys of the original array """
for i in self.args:
yield i
def copyWithin(self, target, start=0, end=None):
""" Copies array elements within the array, from start to end """
if end is None:
end = len(target)
for i in range(start, end):
self.args[i] = target[i]
def entries(self):
"""[Returns a key/value pair Array Iteration Object]
Yields:
[type]: [key/value pair]
"""
for i in self.args:
yield [i, self.args[i]]
def every(self, func):
"""[Checks if every element in an array pass a test]
Args:
func ([type]): [test function]
Returns:
[bool]: [if every array elemnt passed the test]
"""
return all(func(value) for value in self.args)
def at(self, index: int):
"""[takes an integer value and returns the item at that index,
allowing for positive and negative integers.
Negative integers count back from the last item in the array.]
Args:
index ([type]): [position of item]
Returns:
[type]: [item at the given position]
"""
return self.args[index]
class Set():
def __init__(self, *args):
"""[The Set object lets you store unique values of any type, whether primitive values or object references.
TODO - will need to store dictionaries unlike a python set
https://stackoverflow.com/questions/34097959/add-a-dictionary-to-a-set-with-union
]
"""
self.args = set(args)
def __iter__(self):
return iter(self.args)
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __repr__(self):
return repr(self.args)
def __str__(self):
return str(self.args)
@property
def species(self):
""" The constructor function that is used to create derived objects. """
# return self.args
raise NotImplementedError
@property
def size(self):
""" Returns the number of values in the Set object. """
return len(self.args)
def add(self, value):
""" Appends value to the Set object. Returns the Set object with added value. """
# print(type(self.args), value)
self.args.add(value)
return self.args
def clear(self):
""" Removes all elements from the Set object. """
self.args.clear()
def delete(self, value):
""" Removes the element associated to the value
returns a boolean asserting whether an element was successfully removed or not. """
return self.args.remove(value)
def has(self, value):
""" Returns a boolean asserting whether an element is present with the given value in the Set object or not. """
return value in self.args
def contains(self, value):
""" Returns a boolean asserting whether an element is present with the given value in the Set object or not. """
return value in self.args
# Set.prototype[@@iterator]()
# Returns a new iterator object that yields the values for each element in the Set object in insertion order.
def values(self):
""" Returns a new iterator object that yields the values for each element in the Set object in insertion order. """
return iter(self.args)
# def keys(self):
# """ An alias for values """ #?
# return self.values()
def entries(self):
""" Returns a new iterator object that contains an array of [value, value] for each element in the Set object, in insertion order. """
return iter([[i, self.args[i]] for i in self.args])
# This is similar to the Map object, so that each entry's key is the same as its value for a Set.
def forEach(self, callbackFn, thisArg=None):
""" Calls callbackFn once for each value present in the Set object, in insertion order.
If a thisArg parameter is provided, it will be used as the this value for each invocation of callbackFn.
"""
for i in self.args:
callbackFn(i, thisArg)
class Navigator(object):
""" navigator """
# Determines whether cookies are enabled in the browser
cookieEnabled = False
# Determines whether the browser is online
onLine = False
# Returns the name of the browser Navigator
appName = "domonic"
def __init__(self, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
pass
# @property
# def appVersion():
""" Returns the version information of the browser """
# from domonic import __version__
# return __version__
# @property
# def language():
""" Returns the language of the browser Navigator """
# import locale
# return locale.getdefaultlocale()
# platform Returns for which platform the browser is compiled Navigator
# product Returns the engine name of the browser Navigator
# userAgent Returns the user-agent header sent by the browser to the server Navigator
# geolocation Returns a Geolocation object that can be used to locate the user's position Navigator
# appCodeName Returns the code name of the browser Navigator
class Number(float):
""" javascript Number methods """
# print(sys.float_info)
MAX_VALUE = list(sys.float_info)[0]
MIN_VALUE = 5E-324 # CHANGE no longer > list(sys.float_info)[3]
NEGATIVE_INFINITY = float("inf") #: Represents negative infinity (returned on overflow) Number
POSITIVE_INFINITY = float("-inf") #: Represents infinity (returned on overflow) Number
# prototype Allows you to add properties and methods to an object Number
def __init__(self, x="", *args, **kwargs):
self.x = Global.Number(x)
def __add__(self, other):
return self.x + other
def __sub__(self, other):
return self.x - other
def __mul__(self, other):
return self.x * other
def __div__(self, other):
return self.x / other
def __mod__(self, other):
return self.x % other
def __pow__(self, other):
return self.x ** other
def __neg__(self):
return -self.x
def __pos__(self):
return +self.x
def __abs__(self):
return abs(self.x)
def __invert__(self):
return ~self.x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __and__(self, other):
return self.x & other
def __or__(self, other):
return self.x | other
def __xor__(self, other):
return self.x ^ other
def __lshift__(self, other):
return self.x << other
def __rshift__(self, other):
return self.x >> other
def __iadd__(self, other):
return self.x + other
def __isub__(self, other):
return self.x - other
def __imul__(self, other):
return self.x * other
def __idiv__(self, other):
return self.x / other
def __imod__(self, other):
return self.x % other
def __ipow__(self, other):
return self.x ** other
def __ilshift__(self, other):
return self.x << other
def __irshift__(self, other):
return self.x >> other
def __iand__(self, other):
return self.x & other
def __ior__(self, other):
return self.x | other
def __ixor__(self, other):
return self.x ^ other
def __floordiv__(self, other):
return self.x // other
def __rfloordiv__(self, other):
return other // self.x
def __ifloordiv__(self, other):
return other // self.x
def __truediv__(self, other):
return self.x / other
def __rtruediv__(self, other):
return other / self.x
def __itruediv__(self, other):
return other / self.x
def __rmod__(self, other):
return other % self.x
def isInteger(self):
""" Checks whether a value is an integer """
return (type(self.x) == int)
def isSafeInteger(self):
""" Checks whether a value is a safe integer """
raise NotImplementedError
def toExponential(self, num=None):
""" Converts a number into an exponential notation """
if num is not None:
exp = '{:e}'.format(Number(Number(self.x).toFixed(num)))
else:
exp = '{:e}'.format(self.x)
if 'e' in str(self.x):
exp = str(self.x) # python already converts.
n = exp.split('e')[0].rstrip("0")
e = exp.split('e')[1].replace('00', '0')
if n == "0.":
n = "0"
if int(e) != 0:
if int(e) < 10 and int(e) > -10: # TODO - not correct. lazy way to strip left 0s only
e = e.replace('0', '')
# print( "AND:", n, "e" , e )
if n.endswith('.'):
n = n.strip('.')
return n + "e" + e
def toFixed(self, digits: int):
"""[formats a number using fixed-point notation.]
Args:
digits ([int]): [The number of digits to appear after the decimal point
Returns:
[str]: [A string representing the given number using fixed-point notation.]
"""
# print("DIGIT!", digits)
if digits < 0:
digits = 0
fstring = "{:." + str(digits) + "f}"
return fstring.format(round(self.x, digits))
def toPrecision(self, precision):
"""[returns a string representing the Number object to the specified precision.]
Args:
precision ([int]): [An integer specifying the number of significant digits.]
Returns:
[str]: [A string representing a Number object in fixed-point
or exponential notation rounded to precision significant digits]
"""
precision = int(precision)
# return str(math.pow(self.x, precision))
# raise NotImplementedError
return str(round(self.x, precision))
def toString(self, base: int):
"""[returns a string representing the specified Number object.]
Args:
base (int): [An integer in the range 2 through 36 specifying the base to use for representing numeric values.]
Returns:
[str]: [a string representing the specified Number object]
"""
if base is None:
return str(self.x)
import string
digs = string.digits + string.ascii_letters
if self.x < 0:
sign = -1
elif self.x == 0:
return digs[0]
else:
sign = 1
self.x *= sign
digits = []
while self.x:
digits.append(digs[int(self.x % base)])
self.x = int(self.x / base)
if sign < 0:
digits.append('-')
digits.reverse()
return ''.join(digits)
class String(object):
""" javascript String methods """
@staticmethod
def fromCodePoint(codePoint: int):
""" Converts a Unicode code point into a string """
return chr(codePoint)
@staticmethod
def toCodePoint(char: str):
""" Converts a Unicode string into a code point """
return ord(char)
@staticmethod
def raw(string):
""" Returns the string as-is """
import re
return re.escape(string)
# @staticmethod
# def fromCharCode(code: int):
# """ Converts a Unicode code point into a string """
# return chr(code)
@staticmethod
def toCharCode(char: str):
""" Converts a Unicode string into a code point """
return ord(char)
def __init__(self, x="", *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self.x = str(x)
def __str__(self):
return self.x
# def __repr__(self):
# return self.x
def __getitem__(self, item):
# print(item)
return self.x[item]
def __add__(self, other):
return self.x + other
def __radd__(self, other):
return self.x + other
def __iadd__(self, other):
return self.x + other
def __sub__(self, other):
return self.x - other
def __rsub__(self, other):
return other - self.x
def __isub__(self, other):
return self.x - other
def __mul__(self, other):
return self.x * int(other)
def __rmul__(self, other):
return self.x * int(other)
def __imul__(self, other):
return self.x * int(other)
def split(self, expr):
"""[can split a string based on a regex]
Args:
expr ([str]): [valid regex or string to split on]
Returns:
[list]: [list of str]
"""
# if isinstance( expr, RegExp)
import re
# print( '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.', type(expr) )
is_regex = False
try:
re.compile(expr)
is_regex = True
except re.error:
is_regex = False
if is_regex:
return re.split(expr, self.x)
else:
return self.x.split(expr)
def concat(self, *args, seperator=""):
"""[concatenates the string arguments to the calling string and returns a new string.]
Args:
seperator (str, optional): []. Defaults to "".
Returns:
[type]: [A new string containing the combined text of the strings provided.]
"""
args = list(args)
args.insert(0, self.x)
return seperator.join(args)
# @staticmethod
def charCodeAt(self, index: int):
""" Returns the Unicode of the character at the specified index """
return ord(self.x[index])
# @staticmethod
def fromCharCode(self, *codes):
""" returns a string created from the specified sequence of UTF-16 code units """
return "".join([str(chr(x)) for x in codes])
@property
def length(self):
return len(self.x)
def repeat(self, count: int):
""" Returns a new string with a specified number of copies of an existing string """
return self.x * count
def startsWith(self, x: str, start: int = None, end: int = None):
""" Checks whether a string begins with specified characters """
if start is None:
start = 0
if end is None:
end = len(x)
# print(self.x.startswith(x, start, end))
return self.x.startswith(x, start, end)
def substring(self, start: int, end: int = None):
""" Extracts the characters from a string, between two specified indices """
if start < 0:
start = 0
if end is None:
end = len(self.x)
return self.x[start:end]
def endsWith(self, x: str, start: int = None, end: int = None):
""" Checks whether a string ends with specified string/characters """
if start is None:
start = 0
if end is None:
end = len(x)
return self.x.endswith(x, start, end)
def toLowerCase(self):
""" Converts a string to lowercase letters """
return self.x.lower()
def toUpperCase(self):
""" Converts a string to uppercase letters """
return self.x.upper()
def slice(self, start: int = 0, end: int = None):
""" Selects a part of an string, and returns the new string """
if end is None:
end = len(self.x)
return self.x[start:end]
def trim(self):
""" Removes whitespace from both ends of a string """
return self.x.strip()
def charAt(self, index: int):
"""[Returns the character at the specified index (position)]
Args:
index (int): [index position]
Returns:
[str]: [character]
"""
return self.x[index]
def replace(self, old: str, new: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
if callable(new):
return new(self.x, old)
else:
return self.x.replace(old, new, 1)
# re.sub(r"regepx", "old", "new") # TODO - js one also takes a regex
def replaceAll(self, old: str, new: str):
"""[returns a new string where the specified values are replaced. ES2021]
Args:
old ([str]): [word to remove]
new ([str]): [word to replace it with]
Returns:
[str]: [new string with all occurences of old word replaced]
"""
return self.x.replace(old, new)
# def localeCompare():
# """ Compares two strings in the current locale """
# pass
def substr(self, start: int = 0, end: int = None):
""" Extracts the characters from a string, beginning at a specified start position,
and through the specified number of character """
if end is None:
end = len(self.x)
return self.x[start:start + end]
def toLocaleLowerCase(self):
""" Converts a string to lowercase letters, according to the host's locale """
# locale.setlocale()
return self.x.lower()
def toLocaleUpperCase(self):
""" Converts a string to uppercase letters, according to the host's locale """
# locale.setlocale()
return self.x.upper()
def indexOf(self, searchValue: str, fromIndex: int = 0):
"""[returns the index within the calling String object of the first occurrence of the specified value,
starting the search at fromIndex ]
Args:
searchValue (str): [The string value to search for.]
fromIndex (int): [An integer representing the index at which to start the search]
Returns:
[type]: [The index of the first occurrence of searchValue, or -1 if not found.]
"""
try:
return self.x.index(searchValue, fromIndex)
except ValueError:
return -1
def codePointAt(self, index: int):
""" Returns the Unicode code point at the specified index """
return ord(self.x[index])
def padEnd(self, length: int, padChar: str = " "):
""" Pads the end of a string with a specified character """
return str(self.x + padChar * (length - len(self.x)))
def padStart(self, length: int, padChar: str = " "):
""" Pads the start of a string with a specified character """
return padChar * (length - len(self.x)) + self.x
def localeCompare(self, comparisonString: str, locale: str = None, *args):
""" method returns a number indicating whether a reference string comes before,
or after, or is the same as the given string in sort order """
# if locale is None:
# locale = self.locale
# return locale.strcoll(self.x, comparisonString, *args)
# pass
raise NotImplementedError
def trimStart(self, length: int):
""" Removes whitespace from the start of a string """
return self.x.lstrip()
def trimEnd(self, length: int):
""" Removes whitespace from the end of a string """
return self.x.rstrip()
def includes(self, searchValue: str, position: int = 0):
"""[returns true if the specified string is found within the calling String object,]
Args:
searchValue (str): [The string value to search for.]
position (int, optional): [the position to search from]. Defaults to 0.
Returns:
[type]: [a boolean value indicating whether the search value was found.]
"""
return searchValue in self.x[position:]
def search(self, searchValue: str, position: int = 0):
"""[returns true if the specified string is found within the calling String object,]
starting at the specified position.
Args:
searchValue (str): [The string value to search for.]
position (int, optional): [the position to search from]. Defaults to 0.
Returns:
[type]: [a boolean value indicating whether the search value was found.]
"""
return searchValue in self.x[position:]
def matchAll(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.sub(pattern, "", self.x)
def match(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.match(pattern, self.x)
def compile(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.compile(pattern)
def lastIndexOf(self, searchValue: str, fromIndex: int = 0):
"""
returns the last index within the calling String object of the first occurrence of the specified value,
starting the search at fromIndex
"""
return self.x.rindex(searchValue, fromIndex)
# def test(self, pattern: str):? was this on string?
class RegExp():
def __init__(self, expression):
self.expression = expression
# self.flag #: A string that contains the flags of the RegExp object.
# self.dotAll #: Whether . matches newlines or not.
# self.global # Whether to test the regular expression against all possible matches in a string, or only against the first.
# self.hasIndices # Whether the regular expression result exposes the start and end indices of captured substrings.
# self.ignoreCase # Whether to ignore case while attempting a match in a string.
# self.multiline # Whether or not to search in strings across multiple lines.
# self.source # The text of the pattern.
# self.sticky # Whether or not the search is sticky.
# self.unicode # Whether or not Unicode features are enabled.
# self.lastIndex # The index at which to start the next match.
def compile(self):
""" (Re-)compiles a regular expression during execution of a script. """
pass
def exec(self, s: str):
""" Executes a search for a match in its string parameter. """
# print("exec:", self.expression, s)
m = re.search(self.expression, s)
# print(m)
if (m):
return [s for s in m.groups()]
def test(self, s: str):
"""[Tests for a match in its string parameter.]
Args:
s (str): [a string to match]
Returns:
[bool]: [True if match else False]
"""
m = re.match(self.expression, s)
# print(m)
if (m):
return True
else:
return False
def toString(self):
"""" Returns a string representing the specified object. Overrides the Object.prototype.toString() method. """
pass
def __str__(self):
"""" Returns a string representing the specified object. Overrides the Object.prototype.toString() method. """
pass
# def [@@match]()
# Performs match to given string and returns match result.
# def [@@matchAll]()
# Returns all matches of the regular expression against a string.
# def [@@replace]()
# Replaces matches in given string with new substring.
# def [@@search]()
# Searches the match in given string and returns the index the pattern found in the string.
# def [@@split]()
# Splits given string into an array by separating the strin
# https://developer.mozilla.org/en-US/docs/Web/API/URL
class URL(object):
""" a-tag extends from URL """
def __update__(self):
# print( "update URL:", type(self), self )
try:
# make obj with all old props
new = {}
new['protocol'] = self.url.scheme
new['hostname'] = self.url.hostname
new['href'] = self.url.geturl()
new['port'] = self.url.port
new['host'] = '' # self.url.hostname
new['pathname'] = self.url.path
new['hash'] = '' # self.url.hash
new['search'] = '' # self.url.hash
# update it with all the new ones
new = {}
new['protocol'] = self.protocol
new['hostname'] = self.hostname
new['href'] = self.href
new['port'] = self.port
new['host'] = self.host
new['pathname'] = self.pathname
new['hash'] = self.hash # self.hash
new['search'] = self.search # self.url.query
new['_searchParams'] = self._searchParams # URLSearchParams(self.url.query)
# NOTE - rebuild happening here
self.url = urllib.parse.urlsplit(
new['protocol'] + "://" + new['host'] + new['pathname'] + new['hash'] + new['search'])
self.href = self.url.geturl()
except Exception: # as e:
# print('fails on props called by init as they dont exist yet')
# print(e)
pass
def __init__(self, url: str = "", *args, **kwargs): # TODO - relative to
"""URL
builds a url
Args:
url (str): a url
"""
self.url = urllib.parse.urlsplit(url)
self.href = url # self.url.geturl()
self.protocol = self.url.scheme
self.hostname = self.url.hostname
self.port = self.url.port
self.host = self.url.hostname
self.pathname = self.url.path
self.hash = ''
self.search = self.url.query
self._searchParams = URLSearchParams(self.url.query)
@property
def searchParams(self):
return self._searchParams.toString()
def toString(self):
return str(self.href)
# def toJson
# @property
# def href(self):
# TODO - check js vs tag. does js version remove query?. if so detect self.
# return self.href
# @href.setter
# def href(self, href:str):
# self.url = href
# self.href = href
@property
def protocol(self):
return self.__protocol
@protocol.setter
def protocol(self, p: str):
self.__protocol = p
# if self.ready : self.__update__() # TODO - this instead of silent err?
self.__update__()
@property
def hostname(self):
return self.__hostname
@hostname.setter
def hostname(self, h: str):
if h is None:
return
if ":" in h:
h = h.split(':')[0]
self.__hostname = h
self.__update__()
@property
def port(self):
return self.__port
@port.setter
def port(self, p: str):
self.__port = p
self.__update__()
@property
def host(self):
if self.port is not None:
return self.hostname + ":" + str(self.port)
else:
return self.hostname
@host.setter
def host(self, h: str):
if h is None:
return
p = self.port
if ":" in h:
p = int(h.split(':')[1])
h = h.split(':')[0]
self.__host = h
self.hostname = h
self.port = p
self.__update__()
@property
def pathname(self):
return self.__pathname
@pathname.setter
def pathname(self, p: str):
self.__pathname = p
self.__update__()
@property
def hash(self):
"""" hash Sets or returns the anchor part (#) of a URL """
if '#' in self.href:
return '#' + self.href.split('#')[1]
# return ''
return self.__hash
@hash.setter
def hash(self, h: str):
self.__hash = h
self.__update__()
# @property
# def origin(self):
'''# origin Returns the protocol, hostname and port number of a URL Location'''
def __str__(self):
return str(self.href)
# NOTE - node -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
# @staticmethod
# def domainToASCII(domain: str):
# """[It returns the Punycode ASCII serialization of the domain.
# If domain is an invalid domain, the empty string is returned.]
# Args:
# domain (str): [description]
# """
# pass
# @staticmethod
# def domainToUnicode(domain: str):
# """[returns the Unicode serialization of the domain.
# If the domain is invalid, the empty string is returned]
# Args:
# domain (str): [description]
# """
# pass
# @staticmethod
# def fileURLToPath(url: str):
# """[summary]
# Args:
# url (str): [description]
# """
# pass
# @staticmethod
# def format(URL, options):
# """[summary]
# Args:
# URL ([type]): [description]
# options ([type]): [description]
# """
# pass
# @staticmethod
# def pathToFileURL(path: str):
# """[summary]
# Args:
# path (str): [description]
# """
# pass
# @staticmethod
# def urlToHttpOptions(url: str):
# """[summary]
# Args:
# url (str): [description]
# """
# pass
class URLSearchParams:
"""[utility methods to work with the query string of a URL]
created with help of https://6b.eleuther.ai/
"""
def __init__(self, paramString): # , **paramsObj):
"""[Returns a URLSearchParams object instance.]
Args:
paramString ([type]): [ i.e. q=URLUtils.searchParams&topic=api]
"""
# TODO - escape
# import ast
# TODO - dont think i can do this cant urls params have duplicate keys?
# self.params = ast.literal_eval(paramString)
if isinstance(paramString, str):
if paramString.startswith('?'):
paramString = paramString[1:len(paramString)]
import urllib
self.params = urllib.parse.parse_qs(paramString)
elif hasattr(paramString, '__iter__'):
self.params = [item for sublist in paramString for item in sublist]
elif isinstance(paramString, dict):
self.params = dict([(key, item) for key, item in paramString.iteritems()])
else:
raise TypeError("Malformed paramString. Must be a string or a dict with dict like items. Got: %s" % paramString)
def __iter__(self):
for attr in self.params.items(): # dir(self.params.items()):
# if not attr.startswith("__"):
yield attr
def append(self, key, value):
""" Appends a specified key/value pair as a new search parameter """
# TODO - ordereddict?
self.params[key].append(value) # [key]=value
def delete(self, key):
""" Deletes the given search parameter, and its associated value, from the list of all search parameters. """
del self.params[key]
def has(self, key):
""" Returns a Boolean indicating if such a given parameter exists. """
return key in self.params
def entries(self):
""" Returns an iterator allowing iteration through all key/value pairs contained in this object. """
return self.params.items()
def forEach(self, func):
""" Allows iteration through all values contained in this object via a callback function. """
for key, value in self.params.items():
func(key, value)
def keys(self):
""" Returns an iterator allowing iteration through all keys of the key/value pairs contained in this object. """
return self.params.keys()
def get(self, key):
""" Returns the first value associated with the given search parameter. """
try:
return self.params.get(key, None)[0]
except Exception:
return None
def sort(self):
""" Sorts all key/value pairs, if any, by their keys. """
self.params.sort()
def values(self):
""" Returns an iterator allowing iteration through all values of the key/value pairs contained in this object. """
return self.params.values()
def toString(self):
""" Returns a string containing a query string suitable for use in a URL. """
# return '&'.join([str(x) for x in self.params])
return urllib.parse.urlencode(self.params, doseq=True)
# return str(self.params)
def set(self, key, value):
""" Sets the value associated with a given search parameter to the given value.
If there are several values, the others are deleted. """
self.params[key] = (value)
def getAll(self, key):
""" Returns all the values associated with a given search parameter. """
return self.params.get(key)
def __str__(self):
return urllib.parse.urlencode(self.params, doseq=True)
# TODO - test
class Error(Exception):
''' Raise Errors '''
def __init__(self, message, *args, **kwargs):
self.message = message
super(Error, self).__init__(message)
# def __str__(self):
# return self.message
# Error
# AggregateError
# EvalError
# InternalError
# RangeError
# ReferenceError
# SyntaxError
# TypeError
# URIError
# ---- STUBBING OUT SOME NEW ONES TO WORK ON
class Reflect():
"""
The Reflect object provides the following static functions which have the same names as the proxy handler methods.
Some of these methods are also the same as corresponding methods on Object,
although they do have some subtle differences between them.
"""
@staticmethod
def ownKeys(target):
""" Returns an array of the target object's own (not inherited) property keys. """
return target.keys()
# return target.__dict__.keys()
@staticmethod
def apply(target, thisArgument, argumentsList):
""" Calls a target function with arguments as specified by the argumentsList parameter. See also Function.prototype.apply(). """
raise NotImplementedError
@staticmethod
def construct(target, argumentsList, newTarget):
""" The new operator as a function. Equivalent to calling new target(...argumentsList). Also provides the option to specify a different prototype. """
raise NotImplementedError
@staticmethod
def defineProperty(target, propertyKey, attributes):
""" Similar to Object.defineProperty(). Returns a Boolean that is true if the property was successfully defined. """
raise NotImplementedError
@staticmethod
def deleteProperty(target, propertyKey):
""" The delete operator as a function. Equivalent to calling delete target[propertyKey]. """
raise NotImplementedError
@staticmethod
def get(target, propertyKey, receiver):
""" Returns the value of the property. Works like getting a property from an object (target[propertyKey]) as a function. """
raise NotImplementedError
@staticmethod
def getOwnPropertyDescriptor(target, propertyKey):
""" Similar to Object.getOwnPropertyDescriptor(). Returns a property descriptor of the given property if it exists on the object, undefined otherwise. """
raise NotImplementedError
@staticmethod
def getPrototypeOf(target):
""" Same as Object.getPrototypeOf(). """
raise NotImplementedError
@staticmethod
def has(target, propertyKey):
""" Returns a Boolean indicating whether the target has the property. Either as own or inherited. Works like the in operator as a function. """
raise NotImplementedError
@staticmethod
def isExtensible(target):
""" Same as Object.isExtensible(). Returns a Boolean that is true if the target is extensible. """
raise NotImplementedError
@staticmethod
def preventExtensions(target):
""" Similar to Object.preventExtensions(). Returns a Boolean that is true if the update was successful. """
raise NotImplementedError
@staticmethod
def set(target, propertyKey, value, receiver):
""" A function that assigns values to properties. Returns a Boolean that is true if the update was successful. """
raise NotImplementedError
@staticmethod
def setPrototypeOf(target, prototype):
""" A function that sets the prototype of an object. Returns a Boolean that is true if the update was successful. """
raise NotImplementedError
class Symbol():
# a global registry for symbols
registry = []
# Creates a new Symbol object.
def __init__(self, symbol):
self.symbol = symbol
self.description = None
self.registry.append(self)
# self.__class__.registry = self.registry
def hasInstance(self, obj):
"""[A method determining if a constructor object recognizes an object as its instance. Used by instanceof.]
Args:
obj ([type]): [a constructor object]
Returns:
[type]: [True if obj is an instance of this symbol, False otherwise]
"""
return self.symbol == obj.symbol
def isConcatSpreadable(self):
""" A Boolean value indicating if an object should be flattened to its array elements. Used by Array.prototype.concat()."""
return False
def iterator(self, obj):
""" A method returning the default iterator for an object. Used by for...of. """
return iter(obj)
def asyncIterator(self, obj):
""" A method that returns the default AsyncIterator for an object. Used by for await...of. """
return iter(obj)
# A method that matches against a string, also used to determine if an object may be used as a regular expression.
def match(self, item):
""" A method that matches the symbol against a string, also used to determine if an object may be used as a regular expression. """
raise NotImplementedError
# A method that returns an iterator, that yields matches of the regular expression against a string.
# Used by String.prototype.matchAll().
# def matchAll(self, obj):
# if isinstance(obj, str):
# return obj == self.symbol
# return False
# A method that replaces matched substrings of a string. Used by String.prototype.replace().
# def replace(self,
# A method that returns the index within a string that matches the regular expression. Used by String.prototype.search().
def search(self):
raise NotImplementedError
# A method that splits a string at the indices that match a regular expression. Used by String.prototype.split().
def split(self):
raise NotImplementedError
# A constructor function that is used to create derived objects.
def species(self):
raise NotImplementedError
# A method converting an object to a primitive value.
def toPrimitive(self):
raise NotImplementedError
# A string value used for the default description of an object. Used by Object.prototype.toString().
def toStringTag(self):
raise NotImplementedError
# An object value of whose own and inherited property names are excluded from the with environment bindings of the associated object.
def unscopables(self):
raise NotImplementedError
# @staticmethod
# def for(key):
# """ Searches for existing Symbols with the given key and returns it if found.
# Otherwise a new Symbol gets created in the global Symbol registry with key. """
# raise NotImplementedError
# @staticmethod
# def keyFor(sym)
# """ Retrieves a shared Symbol key from the global Symbol registry for the given Symbol. """
# raise NotImplementedError
def toSource(self):
""" Returns a string containing the source of the Symbol. Overrides the Object.prototype.toSource() method. """
raise NotImplementedError
def toString(self):
""" Returns a string containing the description of the Symbol. Overrides the Object.prototype.toString() method. """
raise NotImplementedError
def valueOf(self):
""" Returns the Symbol. Overrides the Object.prototype.valueOf() method. """
raise NotImplementedError
'''
class Atomics():
"""
The Atomics object provides atomic operations as static methods
They are used with SharedArrayBuffer and ArrayBuffer objects.
When memory is shared, multiple threads can read and write the same data in memory.
Atomic operations make sure that predictable values are written and read,
that operations are finished before the next operation starts and that operations are not interrupted.
Wait and notify
The wait() and notify() methods are modeled on Linux futexes ("fast user-space mutex") and provide ways for waiting
until a certain condition becomes true and are typically used as blocking constructs.
"""
@staticmethod
def add(array, index, value):
""" Adds the provided value to the existing value at the specified index of the array.
Returns the old value at that index."""
return array.add(index, value)
def and_(array, index, value):
""" Computes a bitwise AND on the value at the specified index of the array with the provided value.
Returns the old value at that index."""
raise NotImplementedError
@staticmethod
""" Stores a value at the specified index of the array, if it equals a value. Returns the old value."""
def compareExchange(array, index, value):
raise NotImplementedError
@staticmethod
def exchange():
""" Stores a value at the specified index of the array. Returns the old value."""
raise NotImplementedError
@staticmethod
def isLockFree(size):
""" An optimization primitive that can be used to determine whether to use locks or atomic operations.
Returns true if an atomic operation on arrays of the given element size will be implemented using a hardware atomic operation (as opposed to a lock). Experts only."""
raise NotImplementedError
@staticmethod
def load():
""" Returns the value at the specified index of the array."""
raise NotImplementedError
# @staticmethod
# """ Notifies agents that are waiting on the specified index of the array. Returns the number of agents that were notified."""
# def notify(
@staticmethod
def or_():
""" Computes a bitwise OR on the value at the specified index of the array with the provided value. Returns the old value at that index."""
raise NotImplementedError
@staticmethod
def store():
""" Stores a value at the specified index of the array. Returns the value."""
raise NotImplementedError
@staticmethod
def sub():
""" Subtracts a value at the specified index of the array. Returns the old value at that index."""
raise NotImplementedError
@staticmethod
def wait():
""" Verifies that the specified index of the array still contains a value and sleeps awaiting or times out. Returns either "ok", "not-equal", or "timed-out". If waiting is not allowed in the calling agent then it throws an Error exception. (Most browsers will not allow wait() on the browser's main thread.)"""
raise NotImplementedError
@staticmethod
def xor():
""" Computes a bitwise XOR on the value at the specified index of the array with the provided value. Returns the old value at that index."""
raise NotImplementedError
'''
'''
# class ClipboardData():
# clipboardData Returns an object containing the data affected by the clipboard operation
# def __init__():
# pass
# class History():
# def __init__():
# pass
# def back():
# """ Loads the previous URL in the history list """
# raise NotImplementedError
# def forward():
# """ Loads the next URL in the history list """
# raise NotImplementedError
# def go():
# """ Loads a specific URL from the history list """
# raise NotImplementedError
# class Geolocation():
# def __init__():
# pass
def clearWatch():
""" Unregister location/error monitoring handlers previously installed using Geolocation.watchPosition() """
def coordinates Returns:
""" the position and altitude of the device on Earth """
def getCurrentPosition():
""" Returns the current position of the device """
def position Returns:
""" the position of the concerned device at a given time """
def positionError Returns:
""" the reason of an error occurring when using the geolocating device """
def positionOptions Describes:
""" an object containing option properties to pass as a parameter of Geolocation.getCurrentPosition() and Geolocation.watchPosition() """
def watchPosition():
""" Returns a watch ID value that then can be used to unregister the handler by passing it to the Geolocation.clearWatch() method """
# BELOW is legacy data from a dump of ALL dom/js methods. was looking for useful things to port back when this was the only class.
# -- leave here for now - ill delete stuff later. it reminds me what i haven't covered
# clear() Clears the console Console, Storage
# debugger Stops the execution of JavaScript, and calls (if available) the debugging function Statements
# elapsedTime Returns the number of seconds a transition has been running
# error() Outputs an error message to the console Console
# getItem() Returns the value of the specified key name Storage
# getNamedItem() Returns a specified attribute node from a NamedNodeMap Attribute
# item() Returns the attribute node at a specified index in a NamedNodeMap Attribute, HTMLCollection
# namedItem() Returns the element with the specified ID, or name, in an HTMLCollection HTMLCollection
# removeNamedItem() Removes a specified attribute node Attribute
# setNamedItem() Sets the specified attribute node (by name) Attribute
# specified Returns true if the attribute has been specified, otherwise it returns false Attribute
'''
```
#### File: domonic/lerpy/easing.py
```python
from domonic.javascript import Math
"""
t : current time
b : start value
c : change
d : total time
http://robertpenner.com/easing/
"""
class Back():
@staticmethod
def easeIn(t, b, c, d, aa, bb, s=1.70158):
t /= d
return c * t * t * ((s + 1) * t - s) + b
@staticmethod
def easeOut(t, b, c, d, aa, bb, s=1.70158):
t = t / d - 1
return c * (t * t * ((s + 1) * t + s) + 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb, s=1.70158):
t /= d / 2
s *= 1.525
if t < 1:
return c / 2 * (t * t * ((s + 1) * t - s)) + b
t -= 2
return c/2 * (t * t * ((s + 1) * t + s) + 2) + b
class Bounce():
@staticmethod
def easeIn(t, b, c, d, aa=0, bb=0):
return c - Bounce.easeOut(d-t, 0, c, d) + b
@staticmethod
def easeOut(t, b, c, d, aa=0, bb=0):
t /= d
if t < (1/2.75):
return c*(7.5625*t*t) + b
elif t < (2/2.75):
t -= (1.5/2.75)
return c*(7.5625*t*t + 0.75) + b
elif t < (2.5/2.75):
t -= (2.25/2.75)
return c*(7.5625*t*t + 0.9375) + b
else:
t -= (2.625/2.75)
return c*(7.5625*t*t + 0.984375) + b
@staticmethod
def easeInOut(t, b, c, d, aa=0, bb=0):
if t < d/2:
return Bounce.easeIn(t*2, 0, c, d) * .5 + b
return Bounce.easeOut(t*2-d, 0, c, d) * .5 + c*.5 + b
class Circ():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
t /= d
return -c * (Math.sqrt(1 - t*t) - 1) + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
t /= d
t -= 1
return c * Math.sqrt(1 - t*t) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return -c/2 * (Math.sqrt(1 - t*t) - 1) + b
t -= 2
return c/2 * (Math.sqrt(1 - t*t) + 1) + b
class Cubic():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
t /= d
return c*t*t*t + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
t /= d
t -= 1
return c * (t*t*t + 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return c/2*t*t*t + b
t -= 2
return c/2*(t*t*t + 2) + b
class Elastic():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
s = 1.70158
p = 0
a = c
if t == 0:
return b
t /= d
if t == 1:
return b + c
if not p:
p = d * .3
if a < abs(c):
a = c
s = p/4
else:
s = p/(2*Math.PI) * Math.asin(c/a)
t -= 1
return -(a*Math.pow(2, 10 * t) * Math.sin((t*d-s)*(2*Math.PI)/p)) + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
s = 1.70158
p = 0
a = c
if t == 0:
return b
t /= d
if t == 1:
return b + c
if not p:
p = d * .3
if a < abs(c):
a = c
s = p/4
else:
s = p/(2*Math.PI) * Math.asin(c/a)
return a * Math.pow(2, -10*t) * Math.sin((t*d-s)*(2*Math.PI) / p) + c + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
s = 1.70158
p = 0
a = c
if t == 0:
return b
t /= d/2
if t == 2:
return b + c
if not p:
p = d * (.3 * 1.5)
if a < abs(c):
a = c
s = p/4
else:
s = p/(2*Math.PI) * Math.asin(c/a)
if t < 1:
t -= 1
return -a/2 *Math.pow(2, 10*t) * Math.sin((t*d - s)*(2*Math.PI) / p) + b
t -= 1
return a * Math.pow(2, -10*t) * Math.sin((t*d - s)*(2*Math.PI) / p) * .5 + c + b
class Expo():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
return c * Math.pow(2, 10 * (t/d - 1)) + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
return c * (-Math.pow(2, -10 * t/d) + 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return c/2 * Math.pow(2, 10 * (t - 1)) + b
t -= 1
return c/2 * (-Math.pow(2, -10 * t) + 2) + b
class Linear():
# lambda t, b, c, d : c*t/d + b
@staticmethod
def easeNone(t, b, c, d, aa, bb):
return c*t/d + b
@staticmethod
def easeIn(t, b, c, d, aa, bb):
return c*t/d + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
return c*t/d + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
return c*t/d + b
class Quad():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
t /= d
return c*t*t + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
t /= d
return -c * t*(t-2) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return c/2*t*t + b
t-=1
return -c/2 * (t*(t-2) - 1) + b
class Quart():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
t /= d
return c*t*t*t*t + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
t /= d
t -= 1
return -c * (t*t*t*t - 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return c/2*t*t*t*t + b
t -= 2
return -c/2 * (t*t*t*t - 2) + b
class Quint():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
t /= d
return c*t*t*t*t*t + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
t /= d
t -= 1
return c*(t*t*t*t*t + 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
t /= d/2
if t < 1:
return c/2*t*t*t*t*t + b
t -= 2
return c/2*(t*t*t*t*t + 2) + b
class Sine():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
return -c * Math.cos(t/d * (Math.PI/2)) + c + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
return c * Math.sin(t/d * (Math.PI/2)) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
return -c/2 * (Math.cos(Math.PI*t/d) - 1) + b
# class Angle():
# @staticmethod
# def easeIn(t, b, c, d, aa, bb):
# return -c * (t/d - 1) + b
# @staticmethod
# def easeOut(t, b, c, d, aa, bb):
# return c * (t/d) + b
# @staticmethod
# def easeInOut(t, b, c, d, aa, bb):
# if t==0:
# return b
# elif t==d:
# return b+c
# else:
# return -c/2 * (t/d - 2) + b
# class Zoom():
# @staticmethod
# def easeIn(t, b, c, d, aa, bb):
# return c * t/d + b
# @staticmethod
# def easeOut(t, b, c, d, aa, bb):
# return c * (t - d/2)/d + b
# @staticmethod
# def easeInOut(t, b, c, d, aa, bb):
# if t<d/2:
# return c/2 * t/d + b
# t-=d/2
# return c/2 * (t/d - 1) + b
'''
class Exponential():
@staticmethod
def easeIn(t, b, c, d, aa, bb):
if t == 0:
return b
else:
return c * Math.pow(2, 10 * (t/d - 1)) + b
@staticmethod
def easeOut(t, b, c, d, aa, bb):
if t == d:
return b + c
else:
return c * (-Math.pow(2, -10 * t/d) + 1) + b
@staticmethod
def easeInOut(t, b, c, d, aa, bb):
if t==0:
return b
elif t==d:
return b+c
t = t / (d * 0.5)
if t < 1:
return c/2 * Math.pow(2, 10 * (t - 1)) + b
t -= 1
return c/2 * (-Math.pow(2, -10 * t) + 2) + b
'''
# class PressIn():
# @staticmethod
# def easeIn(t, b, c, d, aa, bb):
# return c - b*t/d
# @staticmethod
# def easeOut(t, b, c, d, aa, bb):
# return b*t/d + c
# @staticmethod
# def easeInOut(t, b, c, d, aa, bb):
# if t==0:
# return b
# t *= 2
# if t < 1:
# return c - b*t/d
# return b*t/d + c
```
#### File: domonic/domonic/__main__.py
```python
import argparse
import os
prog = '''
function project(){
PROJECT_NAME=$1
mkdir $PROJECT_NAME
cd $PROJECT_NAME
mkdir static
mkdir static/js
mkdir static/css
mkdir static/img
mkdir static/data
mkdir archive
touch app.py
touch README.md
touch MakeFile
mkdir app
touch app/__init__.py
git init
touch .gitignore
touch static/js/master.js
touch static/css/styles.css
touch static/data/data.json
python3 -m venv venv
. venv/bin/activate
pip3 install requests
pip3 install sanic
pip3 install domonic
pip3 freeze >> requirements.txt
chmod -R 777 static
open .
}
'''
# def install():
# def clone_webpage(url):
# clone a webpage and all the resources for that page with wget
# import os
# os.system('wget -r -l1 -A.js,.css,.jpg,.jpeg,.png,.gif,.svg ' + url)
# class domonic_ui(object):
# """
# domonic UI - browser interface to create pyml via contextmenu clicks?.
# """
# def __init__(self):
# pass
def parse_args():
parser = argparse.ArgumentParser(add_help=False, prog="domonic", usage="%(prog)s [options]", description="Generate HTML with Python 3")
parser.add_argument('-a', '--assets', help="generate as assets directory with common files", action='store_true')
parser.add_argument('-d', '--download', help="Attempts to to generate domonic template from a webpage", type=str)
parser.add_argument('-h', '--help', action='store_true') # launch the docs
parser.add_argument('-v', '--version', action='store_true')
# parser.add_argument('-u', '--ui', help="launches a UI")
parser.add_argument('-i', '--install', action='store_true') # add 'projects' to the .bashprofile or .bashrc
# parser.add_argument('-w', '--website', action='store_true') # launch the docs
# parser.add_argument('-s', '--server', help="runs python -m http.server", type=str)
# parser.add_argument('-u', '--url', help="url to launch the server", type=str)
# -- ideas
# -- change all file extensions. from, to
# -- generate assets/app/license/readme/sitemap.
args = parser.parse_args()
return args
def do_things(arguments):
from domonic.terminal import TerminalException
try:
if arguments.assets is True:
from domonic.utils import Utils
Utils.init_assets()
# --license,readme,sitemap,requirements
except TerminalException as e:
print(e)
# print(arguments.download)
if arguments.download is not None:
print('creating domonic template from url:')
from domonic import domonic
page = domonic.get(arguments.download)
from domonic.html import render
from domonic.utils import Utils
print("filename:", Utils.url2file(arguments.download))
render(page, Utils.url2file(arguments.download))
if arguments.help is True:
import webbrowser
webbrowser.open_new("https://domonic.readthedocs.io/")
if arguments.version is True:
from domonic import __version__
print(__version__)
return __version__
# if arguments.server is True:
# port = domonic.get(arguments.server)
# os.system('python -m http.server ' + port)
if arguments.install is True:
# detect operating system and attempts to append prog to the .bashprofile or .bashrc
if os.name == 'nt':
print('Sorry, this install is currently unavaialable for windows')
else:
# detect if the user has a bashrc or bashprofile
if os.path.exists(os.path.expanduser('~/.bashrc')):
# dont do it if alreay exists
if 'function project()' not in open(os.path.expanduser('~/.bashrc')).read():
print('found .bashrc')
with open(os.path.expanduser('~/.bashrc'), 'a') as f:
f.write('\n\n# domonic\n')
f.write(prog)
f.write('alias domonic="python3 -m domonic"\n')
else:
print('already installed. You need to manually remove it from ~/.bashrc')
elif os.path.exists(os.path.expanduser('~/.bash_profile')):
if 'function project()' not in open(os.path.expanduser('~/.bash_profile')).read():
print('found .bash_profile')
with open(os.path.expanduser('~/.bash_profile'), 'a') as f:
f.write('\n\n# domonic\n')
f.write(prog)
f.write('alias domonic="python3 -m domonic"\n')
else:
print('already installed. You need to manually remove it from ~/.bash_profile')
else:
print('no bashrc or bash_profile found. you need to manually add the following to your .bashrc or .bash_profile')
print(prog)
if __name__ == "__main__":
args = parse_args()
do_things(args)
```
#### File: domonic/domonic/mathml.py
```python
from domonic.html import tag
from domonic.dom import Node
mathml_tags = ["math", "maction", "math", "menclose", "merror", "mfenced", "mfrac", "mi", "mmultiscripts", "mn", "mo", "mover", "mpadded", "mphantom", "mroot", "mrow",
"ms", "mspace", "msqrt", "mstyle", "msub", "msubsup", "msup", "mtable", "mtd", "mtext", "mtr", "munder", "munderover", "semantics", "maligngroup",
"malignmark", "msline", "msgroup", "mlongdiv", "mstyle", "mprescripts", "mscarries", "mscarry", "munder", "munderover", "none"]
# mathml_attributes = []
def mathml_init(self, *args, **kwargs):
tag.__init__(self, *args, **kwargs)
Node.__init__(self, *args, **kwargs)
math_ = type('math', (tag, Node), {'name': 'math', '__init__': mathml_init})
maction = type('maction', (tag, Node), {'name': 'maction', '__init__': mathml_init})
menclose = type('menclose', (tag, Node), {'name': 'menclose', '__init__': mathml_init})
merror = type('merror', (tag, Node), {'name': 'merror', '__init__': mathml_init})
mfenced = type('mfenced', (tag, Node), {'name': 'mfenced', '__init__': mathml_init})
mfrac = type('mfrac', (tag, Node), {'name': 'mfrac', '__init__': mathml_init})
mi = type('mi', (tag, Node), {'name': 'mi', '__init__': mathml_init})
mmultiscripts = type('mmultiscripts', (tag, Node), {'name': 'mmultiscripts', '__init__': mathml_init})
mn = type('mn', (tag, Node), {'name': 'mn', '__init__': mathml_init})
mo = type('mo', (tag, Node), {'name': 'mo', '__init__': mathml_init})
mover = type('mover', (tag, Node), {'name': 'mover', '__init__': mathml_init})
mpadded = type('mpadded', (tag, Node), {'name': 'mpadded', '__init__': mathml_init})
mphantom = type('mphantom', (tag, Node), {'name': 'mphantom', '__init__': mathml_init})
mroot = type('mroot', (tag, Node), {'name': 'mroot', '__init__': mathml_init})
mrow = type('mrow', (tag, Node), {'name': 'mrow', '__init__': mathml_init})
ms = type('ms', (tag, Node), {'name': 'ms', '__init__': mathml_init})
mspace = type('mspace', (tag, Node), {'name': 'mspace', '__init__': mathml_init})
msqrt = type('msqrt', (tag, Node), {'name': 'msqrt', '__init__': mathml_init})
mstyle = type('mstyle', (tag, Node), {'name': 'mstyle', '__init__': mathml_init})
msub = type('msub', (tag, Node), {'name': 'msub', '__init__': mathml_init})
msubsup = type('msubsup', (tag, Node), {'name': 'msubsup', '__init__': mathml_init})
msup = type('msup', (tag, Node), {'name': 'msup', '__init__': mathml_init})
mtable = type('mtable', (tag, Node), {'name': 'mtable', '__init__': mathml_init})
mtd = type('mtd', (tag, Node), {'name': 'mtd', '__init__': mathml_init})
mtext = type('mtext', (tag, Node), {'name': 'mtext', '__init__': mathml_init})
mtr = type('mtr', (tag, Node), {'name': 'mtr', '__init__': mathml_init})
munder = type('munder', (tag, Node), {'name': 'munder', '__init__': mathml_init})
munderover = type('munderover', (tag, Node), {'name': 'munderover', '__init__': mathml_init})
semantics = type('semantics', (tag, Node), {'name': 'semantics', '__init__': mathml_init})
maligngroup = type('maligngroup', (tag, Node), {'name': 'maligngroup', '__init__': mathml_init})
malignmark = type('malignmark', (tag, Node), {'name': 'malignmark', '__init__': mathml_init})
msline = type('msline', (tag, Node), {'name': 'msline', '__init__': mathml_init})
msgroup = type('msgroup', (tag, Node), {'name': 'msgroup', '__init__': mathml_init})
mlongdiv = type('mlongdiv', (tag, Node), {'name': 'mlongdiv', '__init__': mathml_init})
mstyle = type('mstyle', (tag, Node), {'name': 'mstyle', '__init__': mathml_init})
mprescripts = type('mprescripts', (tag, Node), {'name': 'mprescripts', '__init__': mathml_init})
mscarries = type('mscarries', (tag, Node), {'name': 'mscarries', '__init__': mathml_init})
mscarry = type('mscarry', (tag, Node), {'name': 'mscarry', '__init__': mathml_init})
munder = type('munder', (tag, Node), {'name': 'munder', '__init__': mathml_init})
munderover = type('munderover', (tag, Node), {'name': 'munderover', '__init__': mathml_init})
none = type('none', (tag, Node), {'name': 'none', '__init__': mathml_init})
```
#### File: examples/sockets/atoms.py
```python
import asyncio
import websockets # you gotta 'pip3 install websockets' for this example.
import json
import sys
sys.path.insert(0, '../..')
from domonic.javascript import *
from domonic.html import *
from domonic.particles import *
# run this first. python3 atoms.py
# then open and look at atoms.html while the socket is running
# create and animate some particles
WIDTH = 1000
HEIGHT = 600
ATOM_COUNT = 100
atoms=[]
def init():
atoms=[]
for each in range(ATOM_COUNT):
creatAtom()
def creatAtom():
p = Particle(Math.random()*2)
p.grav = Math.random()*10
p.maxSpeed = 1000
p.damp = 0.4
p.wander = 5
p.x = Math.random()*WIDTH
p.y = Math.random()*HEIGHT
# p.vx = Math.random()*1000
p.vy = Math.random()*10
p.set_bounds({'xMin':0, 'yMin':0, 'xMax':WIDTH, 'yMax':HEIGHT})
atoms.append( p )
def update_atoms():
for atom in atoms:
atom.update()
# run the update loop from here.
init()
loop = window.setInterval(update_atoms, 10) # update on own clock. clients see state if request via animfram
# create webpage with a socket connection back to our server so it can get the atom data
page = html(
# make a canvas
style('''
canvas {
background: #131c35 linear-gradient(black,#192853, black);
display:block; position:absolute;
top:0; left:0; right:0; bottom:0;
}
''',
_type="text/css"
),
body(canvas(_id="canvas", _width="1000", _height="600")),
# listen on the socket and call draw when we get a message
script('''
const socket = new WebSocket('ws://0.0.0.0:5555');
socket.onmessage = function(event) { atoms = JSON.parse(event.data); draw(); };
'''),
# draw the atoms
script('''
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var WIDTH=canvas.width;
var HEIGHT=canvas.height;
function resizeCanvas(){
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
WIDTH=canvas.width;
HEIGHT=canvas.height;
}
function animate() {
socket.send('!'); // send any old message to trigger socket response. so i can control the framerate
// draw();
}
function draw() {
context.clearRect(0, 0, WIDTH, HEIGHT);
// context.globalCompositeOperation = "source-over";
var i, point;
for(i = 0; i < atoms.length; i++ ) {
point = atoms[i];
context.save();
context.translate(point.x,point.y);
context.rotate( point.rotation );
context.restore();
// window.console.log(point);
drawAtom(point,i);
}
// context.shadowBlur = 10;
// context.shadowColor = 'white'
// context.globalAlpha = 0.1;
// context.filter = 'blur(2px)';
// window.requestAnimationFrame(animate);
}
function drawAtom(p,i){
context.beginPath();
context.fillStyle = 'white';
context.arc(p.x, p.y, 1, 0, 2 * Math.PI, false);
context.lineWidth = 2;
context.strokeStyle = '#000';
context.stroke();
context.fill();
}
var intID;
function setFramerate(val){
clearInterval(this.intID)
this.intID = setInterval( function(){ animate(); }, 1000/val );
// window.requestAnimationFrame(animate);
}
setFramerate(60);
resizeCanvas();
''')
)
# render a page of particles you can open an look at while the socket server is running
render( page, 'atoms.html' )
# run the socket server
async def update(websocket, path):
while True:
# msg = await websocket.recv()
await websocket.send(json.dumps(atoms, default=vars))
server = websockets.serve(update, '0.0.0.0', 5555)
asyncio.get_event_loop().run_until_complete(server)
asyncio.get_event_loop().run_forever()
'''
# see if a gevent server is better
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
class Echo(WebSocketApplication):
# def on_open(self):
# print "Connection opened"
def on_message(self, message):
self.ws.send(json.dumps(atoms, default=vars))
# self.ws.send(message)
# def on_close(self, reason):
# print reason
WebSocketServer(('0.0.0.0', 5555), Resource({'/': Echo})).serve_forever()
'''
'''
# see if flask-threaded server allows better concurrency. for other windows and devices
from flask import Flask
from flask_threaded_sockets import Sockets, ThreadedWebsocketServer
app = Flask(__name__)
sockets = Sockets(app)
@sockets.route('/')
def echo_socket(ws):
while not ws.closed:
message = ws.receive()
ws.send(json.dumps(atoms, default=vars))
if __name__ == "__main__":
srv = ThreadedWebsocketServer("0.0.0.0", 5555, app)
srv.serve_forever()
'''
``` |
{
"source": "Jordan-Cottle/Game-Design-Capstone",
"score": 3
} |
#### File: starcorp/database/session.py
```python
from sqlalchemy import engine_from_config
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from data import CONFIG
from utils import get_logger
LOGGER = get_logger(__name__)
class Database:
""" Class for managing a database engine. """
def __init__(self, url=None) -> None:
self.url = url
self._engine = None
def create_engine(self):
""" Create a new engine. """
config = CONFIG.get("database.engine")
# Override url is set by constructor
if self.url is not None:
config["url"] = self.url
return engine_from_config(config, prefix="")
@property
def engine(self):
""" Lazy load an engine object. """
if self._engine is None:
self._engine = self.create_engine()
return self._engine
ENGINE = Database().engine
class DatabaseSession:
""" Class for managing a session lifecycle. """
def __init__(self, bind=ENGINE) -> None:
self.bind = bind
self._session = None
@property
def session(self):
""" Lazy load session when it is needed. """
if self._session is None:
self._session = Session(bind=self.bind)
return self._session
def __getattr__(self, name):
return getattr(self.session, name)
def __enter__(self):
return self
def __exit__(self, *args): # pylint: disable=unused-argument
if self._session is None:
return
try:
self.session.commit()
except SQLAlchemyError:
LOGGER.exception("Database operation failed to commit, rolling back")
self.session.rollback()
else:
self.session.close()
```
#### File: starcorp/objects/city.py
```python
from objects import FOOD, FUEL, WATER, GameObject, Resource
from world.coordinates import Coordinate
from global_context import CITIES
from utils import get_logger
LOGGER = get_logger(__name__)
class City(GameObject):
""" Represent a city in the game. """
def __init__(self):
super().__init__()
self.name = "City"
self.population = 0
self.resources = {
FOOD: 5,
WATER: 15,
FUEL: 2,
}
def valid_position(self, position):
""" Cities don't move. """
return False
@property
def json(self):
""" Get json serializable form of a city. """
data = super().json
data["name"] = self.name
data["population"] = self.population
data["resources"] = {
resource.name: value for resource, value in self.resources.items()
}
return data
@classmethod
def load(cls, data):
""" Reinstantiate a city from a data dictionary. """
city = super().load(data)
city.name = data["name"]
city.population = data["population"]
city.resources = {
Resource.retrieve(name): value for name, value in data["resources"].items()
}
return city
@property
def growth(self):
""" Calculate growth for the city. """
total_outstanding_demand = sum(
(self.demand(resource) - self.volume(resource)) * resource.growth_weight
for resource in self.resources
)
# TODO: figure out better equation for growth/demand
growth = self.population - (total_outstanding_demand * 1.5)
if growth > 0:
# Cap to proportion of population
growth = min(self.population * 0.5, growth)
return int(growth)
def volume(self, resource):
""" Return amount of a given resource available in the city. """
return self.resources[resource]
def demand(self, resource):
""" Calculate demand for a resource in the city. """
demand = self.population * resource.demand_ratio
return int(demand)
def tick(self):
""" Process a game tick on the city. """
for resource in self.resources:
self.resources[resource] -= self.demand(resource)
if self.volume(resource) < 0:
self.resources[resource] = 0
self.population += self.growth
def sell(self, resource, volume):
""" Add resources to the city and return the total price of those resources. """
value = resource.cost(self.demand(resource)) * volume
LOGGER.info(f"{volume} units of {resource} sold to {self} for ${value}")
self.resources[resource] += volume
LOGGER.debug(f"{self.resources[resource]} units in stock at {self}")
return value
def __str__(self):
return self.name
# TODO: move initialization of cities to a proper loading function
c = City()
c.name = "Demoville"
c.population = 10
c.position = Coordinate(-4, 2, 2)
CITIES[c.position] = c
c = City()
c.name = "Otherville"
c.population = 42
c.position = Coordinate(-2, 2, 0)
CITIES[c.position] = c
```
#### File: starcorp/objects/gameobject.py
```python
from abc import abstractmethod
from uuid import uuid4
from data.json_util import Serializable
from world.coordinates import Coordinate
class GameObject(Serializable):
""" And object that has a position in the game world. """
objects = {}
def __init__(self):
self.uuid = str(uuid4())
self.position = Coordinate(0, 0, 0)
GameObject.objects[self.uuid] = self
@abstractmethod
def valid_position(self, position):
""" Determine if a position is a valid for this object. """
@property
def json(self):
data = super().json
data["uuid"] = self.uuid
data["position"] = self.position.json
return data
@classmethod
def load(cls, data):
obj = cls()
obj.uuid = data["uuid"]
obj.position = Coordinate.load(data["position"])
return obj
def move_to(self, position):
""" Move a game object to a new position. """
if not self.valid_position(position):
raise ValueError(f"{position} is invalid for {self}")
self.position = position
@staticmethod
def get(uuid):
""" Get the game object from storage based on its uuid. """
return GameObject.objects.get(uuid)
```
#### File: starcorp/utils/__init__.py
```python
from .logging import LOGGER, get_logger
def get_subclasses(cls):
""" Get all subclasses of a class. """
for sub_cls in cls.__subclasses__():
yield from get_subclasses(sub_cls)
yield sub_cls
```
#### File: starcorp/world/layer.py
```python
import json
from data import Serializable, from_json
from world import Coordinate
class Layer(Serializable):
""" Contains a map of locations related to data points. """
def __init__(self):
self.data = {}
def __getitem__(self, key):
if not isinstance(key, Coordinate):
raise TypeError("Layers can only be indexed using cube coordinates")
return self.data[key]
def __setitem__(self, key, value):
if not isinstance(key, Coordinate):
raise TypeError("Layers can only be indexed using cube coordinates")
self.data[key] = value
@property
def json(self):
""" Return a json serializable form of the layer. """
data = super().json
data.update({coordinate.json: value for coordinate, value in self.data.items()})
return data
@classmethod
def load(cls, data):
""" Reinstantiate a layer from a data dictionary. """
super().load(data)
layer = cls()
for coordinate, value in data.items():
coordinate = Coordinate.load(coordinate)
if isinstance(value, dict):
value = json.loads(value, object_hook=from_json)
layer[coordinate] = value
return layer
```
#### File: StarcorpServer/tests/test_layer.py
```python
import pytest
from world.layer import Layer
from data import TileType
@pytest.fixture(name="tilemap")
def _tilemap(origin, a, b):
layer = Layer()
layer[origin] = TileType.GROUND
layer[a] = TileType.SPACE
layer[b] = TileType.WATER
return layer
def test_serializable(tilemap, origin, a, b):
data = tilemap.json
print(data)
assert data["__TYPE__"] == "Layer"
assert data["0,0,0"] == 1
assert data["1,2,-3"] == 0
assert data["-4,1,3"] == 2
reserialized = Layer.load(data)
assert tilemap[origin] == reserialized[origin]
assert tilemap[a] == reserialized[a]
assert tilemap[b] == reserialized[b]
assert tilemap[a] == TileType.SPACE
assert tilemap[b] == TileType.WATER
``` |
{
"source": "Jordan-Cottle/HeroQuest",
"score": 3
} |
#### File: HeroQuest/tests/test_game_time.py
```python
from datetime import datetime, timedelta
import pytest
from hero_quest.game_time import (
GLOBAL_CLOCK,
GameClock,
TimeSpent,
Timer,
TimerComplete,
)
def test_clock_moving_forward_when_time_spent():
"""Ensure GameClock objects move forward when time is spent."""
clock = GameClock(start=datetime(year=2000, month=1, day=1))
global_start_time = GLOBAL_CLOCK.current_time
event = TimeSpent(days=2, hours=3, minutes=4, seconds=5)
event.fire()
assert clock.current_time == datetime(
year=2000, month=1, day=3, hour=3, minute=4, second=5
)
assert GLOBAL_CLOCK.current_time == global_start_time + event.timdelta
def test_paused_clock_moving_forward_when_time_spent():
"""Ensure paused GameClock objects do not move forward when time is spent."""
clock = GameClock(start=datetime(year=2000, month=1, day=1))
start_time = clock.current_time
global_start_time = GLOBAL_CLOCK.current_time
clock.pause()
event = TimeSpent(days=2, hours=3, minutes=4, seconds=5)
event.fire()
assert clock.current_time == start_time
assert GLOBAL_CLOCK.current_time == global_start_time + event.timdelta
def test_resumed_clock_moving_forward_when_time_spent():
"""Ensure resumed GameClock objects move forward when time is spent."""
clock = GameClock(start=datetime(year=2000, month=1, day=1))
start_time = clock.current_time
global_start_time = GLOBAL_CLOCK.current_time
clock.pause()
event = TimeSpent(days=2, hours=3, minutes=4, seconds=5)
event.fire()
assert clock.current_time == start_time
assert GLOBAL_CLOCK.current_time == global_start_time + event.timdelta
clock.resume()
event.fire()
assert clock.current_time == start_time + event.timdelta
assert GLOBAL_CLOCK.current_time == global_start_time + (2 * event.timdelta)
@pytest.mark.parametrize(
"start,delta,end",
[
(
datetime(year=2000, month=5, day=5),
timedelta(days=1),
datetime(year=2000, month=5, day=4),
),
(
datetime(year=2000, month=5, day=5),
timedelta(days=5),
datetime(year=2000, month=4, day=30),
),
(
datetime(year=2000, month=1, day=1),
timedelta(days=1),
datetime(year=1999, month=12, day=31),
),
],
)
def test_clock_moving_backwards(start: datetime, delta: timedelta, end: datetime):
"""Ensure GameClock objects can move backwards."""
clock = GameClock(start=start)
clock.backward(delta)
assert clock.current_time == end
del clock
def test_timer_countdown():
"""Test that timers count down properly when time is spent."""
timer = Timer(seconds=30)
assert not timer.complete
timer_completed = False
@TimerComplete.handler
def detect_timer_end(event: TimerComplete):
nonlocal timer_completed
assert event.timer is timer
assert event.timer.complete
timer_completed = True
TimeSpent(seconds=30).fire()
assert timer.complete
assert (
timer_completed
), "Timers should fire a timer completed event when they complete."
# Clean up timer complete handler
TimerComplete.remove_handler(detect_timer_end)
def test_timer_countdown_complete():
"""Test that timers complain if moved forward after being complete."""
timer_duration = 30
time_offset = timedelta(seconds=timer_duration)
timer = Timer(seconds=timer_duration)
assert not timer.complete
timer.forward(time_offset)
assert timer.complete
# Undo automatic deactivation, simulates a flow/bug/misuse of timer.
timer.active = True
with pytest.raises(ValueError):
timer.forward(time_offset)
def test_timer_moving_backwards():
"""Test that timers complain if moved forward after being complete."""
timer_duration = 30
time_offset = timedelta(seconds=5)
timer = Timer(seconds=timer_duration)
assert not timer.complete
assert timer.time_remaining == timedelta(seconds=timer_duration)
timer.backward(time_offset)
assert not timer.complete
assert timer.time_remaining == timedelta(seconds=timer_duration) + time_offset
``` |
{
"source": "jordan-day/sportsreference",
"score": 3
} |
#### File: sportsreference/nba/teams.py
```python
import pandas as pd
from .constants import PARSING_SCHEME
from ..decorators import float_property_decorator, int_property_decorator
from .nba_utils import _retrieve_all_teams
from .. import utils
from .roster import Roster
from .schedule import Schedule
class Team:
"""
An object containing all of a team's season information.
Finds and parses all team stat information and identifiers, such as rank,
name, and abbreviation, and sets them as properties which can be directly
read from for easy reference.
If calling directly, the team's abbreviation needs to be passed. Otherwise,
the Teams class will handle all arguments.
Parameters
----------
team_name : string (optional)
The name of the team to pull if being called directly.
team_data : string (optional)
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string. Is only used when called directly
from the Teams class.
rank : int (optional)
A team's position in the league based on the number of points they
obtained during the season. Is only used when called directly from the
Teams class.
year : string (optional)
The requested year to pull stats from.
"""
def __init__(self, team_name=None, team_data=None, rank=None, year=None):
self._year = year
self._rank = rank
self._abbreviation = None
self._name = None
self._games_played = None
self._minutes_played = None
self._field_goals = None
self._field_goal_attempts = None
self._field_goal_percentage = None
self._three_point_field_goals = None
self._three_point_field_goal_attempts = None
self._three_point_field_goal_percentage = None
self._two_point_field_goals = None
self._two_point_field_goal_attempts = None
self._two_point_field_goal_percentage = None
self._free_throws = None
self._free_throw_attempts = None
self._free_throw_percentage = None
self._offensive_rebounds = None
self._defensive_rebounds = None
self._total_rebounds = None
self._assists = None
self._steals = None
self._blocks = None
self._turnovers = None
self._personal_fouls = None
self._points = None
self._opp_field_goals = None
self._opp_field_goal_attempts = None
self._opp_field_goal_percentage = None
self._opp_three_point_field_goals = None
self._opp_three_point_field_goal_attempts = None
self._opp_three_point_field_goal_percentage = None
self._opp_two_point_field_goals = None
self._opp_two_point_field_goal_attempts = None
self._opp_two_point_field_goal_percentage = None
self._opp_free_throws = None
self._opp_free_throw_attempts = None
self._opp_free_throw_percentage = None
self._opp_offensive_rebounds = None
self._opp_defensive_rebounds = None
self._opp_total_rebounds = None
self._opp_assists = None
self._opp_steals = None
self._opp_blocks = None
self._opp_turnovers = None
self._opp_personal_fouls = None
self._opp_points = None
if team_name:
team_data = self._retrieve_team_data(year, team_name)
self._parse_team_data(team_data)
def __str__(self):
"""
Return the string representation of the class.
"""
return f'{self.name} ({self.abbreviation}) - {self._year}'
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def _retrieve_team_data(self, year, team_name):
"""
Pull all stats for a specific team.
By first retrieving a dictionary containing all information for all
teams in the league, only select the desired team for a specific year
and return only their relevant results.
Parameters
----------
year : string
A ``string`` of the requested year to pull stats from.
team_name : string
A ``string`` of the team's 3-letter abbreviation, such as 'HOU' for
the Houston Rockets.
Returns
-------
PyQuery object
Returns a PyQuery object containing all stats and information for
the specified team.
"""
team_data_dict, year = _retrieve_all_teams(year)
self._year = year
team_data = team_data_dict[team_name]['data']
self._rank = team_data_dict[team_name]['rank']
return team_data
def _parse_team_data(self, team_data):
"""
Parses a value for every attribute.
This function looks through every attribute with the exception of
'_rank' and retrieves the value according to the parsing scheme and
index of the attribute from the passed HTML data. Once the value is
retrieved, the attribute's value is updated with the returned result.
Note that this method is called directly once Team is invoked and does
not need to be called manually.
Parameters
----------
team_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
"""
for field in self.__dict__:
# The rank attribute is passed directly to the class during
# instantiation.
if field == '_rank' or \
field == '_year':
continue
value = utils._parse_field(PARSING_SCHEME,
team_data,
str(field)[1:])
setattr(self, field, value)
@property
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string abbreviation of the
team, such as 'DET'.
"""
fields_to_include = {
'abbreviation': self.abbreviation,
'assists': self.assists,
'blocks': self.blocks,
'defensive_rebounds': self.defensive_rebounds,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'games_played': self.games_played,
'minutes_played': self.minutes_played,
'name': self.name,
'offensive_rebounds': self.offensive_rebounds,
'opp_assists': self.opp_assists,
'opp_blocks': self.opp_blocks,
'opp_defensive_rebounds': self.opp_defensive_rebounds,
'opp_field_goal_attempts': self.opp_field_goal_attempts,
'opp_field_goal_percentage': self.opp_field_goal_percentage,
'opp_field_goals': self.opp_field_goals,
'opp_free_throw_attempts': self.opp_free_throw_attempts,
'opp_free_throw_percentage': self.opp_free_throw_percentage,
'opp_free_throws': self.opp_free_throws,
'opp_offensive_rebounds': self.opp_offensive_rebounds,
'opp_personal_fouls': self.opp_personal_fouls,
'opp_points': self.opp_points,
'opp_steals': self.opp_steals,
'opp_three_point_field_goal_attempts':
self.opp_three_point_field_goal_attempts,
'opp_three_point_field_goal_percentage':
self.opp_three_point_field_goal_percentage,
'opp_three_point_field_goals': self.opp_three_point_field_goals,
'opp_total_rebounds': self.opp_total_rebounds,
'opp_turnovers': self.opp_turnovers,
'opp_two_point_field_goal_attempts':
self.opp_two_point_field_goal_attempts,
'opp_two_point_field_goal_percentage':
self.opp_two_point_field_goal_percentage,
'opp_two_point_field_goals': self.opp_two_point_field_goals,
'personal_fouls': self.personal_fouls,
'points': self.points,
'rank': self.rank,
'steals': self.steals,
'three_point_field_goal_attempts':
self.three_point_field_goal_attempts,
'three_point_field_goal_percentage':
self.three_point_field_goal_percentage,
'three_point_field_goals': self.three_point_field_goals,
'total_rebounds': self.total_rebounds,
'turnovers': self.turnovers,
'two_point_field_goal_attempts':
self.two_point_field_goal_attempts,
'two_point_field_goal_percentage':
self.two_point_field_goal_percentage,
'two_point_field_goals': self.two_point_field_goals
}
return pd.DataFrame([fields_to_include], index=[self._abbreviation])
@int_property_decorator
def rank(self):
"""
Returns an ``int`` of the team's rank based on the number of points
they score per game.
"""
return self._rank
@property
def abbreviation(self):
"""
Returns a ``string`` of the team's abbreviation, such as 'DET' for the
Detroit Pistons.
"""
return self._abbreviation
@property
def schedule(self):
"""
Returns an instance of the Schedule class containing the team's
complete schedule for the season.
"""
return Schedule(self._abbreviation, self._year)
@property
def roster(self):
"""
Returns an instance of the Roster class containing all players for the
team during the season with all career stats.
"""
return Roster(self._abbreviation, self._year)
@property
def name(self):
"""
Returns a ``string`` of the team's full name, such as '<NAME>'.
"""
return self._name
@int_property_decorator
def games_played(self):
"""
Returns an ``int`` of the total number of games the team has played
during the season.
"""
return self._games_played
@int_property_decorator
def minutes_played(self):
"""
Returns an ``int`` of the total number of minutes played by all players
on the team during the season.
"""
return self._minutes_played
@int_property_decorator
def field_goals(self):
"""
Returns an ``int`` of the total number of field goals the team has made
during the season.
"""
return self._field_goals
@int_property_decorator
def field_goal_attempts(self):
"""
Returns an ``int`` of the total number of field goals the team has
attempted during the season.
"""
return self._field_goal_attempts
@float_property_decorator
def field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of field goals made divided by
the number of attempts. Percentage ranges from 0-1.
"""
return self._field_goal_percentage
@int_property_decorator
def three_point_field_goals(self):
"""
Returns an ``int`` of the total number of three point field goals the
team has made during the season.
"""
return self._three_point_field_goals
@int_property_decorator
def three_point_field_goal_attempts(self):
"""
Returns an ``int`` of the total number of three point field goals the
team has attempted during the season.
"""
return self._three_point_field_goal_attempts
@float_property_decorator
def three_point_field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of three point field goals made
divided by the number of attempts. Percentage ranges from 0-1.
"""
return self._three_point_field_goal_percentage
@int_property_decorator
def two_point_field_goals(self):
"""
Returns an ``int`` of the total number of two point field goals the
team has made during the season.
"""
return self._two_point_field_goals
@int_property_decorator
def two_point_field_goal_attempts(self):
"""
Returns an ``int`` of the total number of two point field goals the
team has attempted during the season.
"""
return self._two_point_field_goal_attempts
@float_property_decorator
def two_point_field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of two point field goals made
divided by the number of attempts. Percentage ranges from 0-1.
"""
return self._two_point_field_goal_percentage
@int_property_decorator
def free_throws(self):
"""
Returns an ``int`` of the total number of free throws made during the
season.
"""
return self._free_throws
@int_property_decorator
def free_throw_attempts(self):
"""
Returns an ``int`` of the total number of free throw attempts during
the season.
"""
return self._free_throw_attempts
@float_property_decorator
def free_throw_percentage(self):
"""
Returns a ``float`` of the percentage of free throws made divided by
the attempts. Percentage ranges from 0-1.
"""
return self._free_throw_percentage
@int_property_decorator
def offensive_rebounds(self):
"""
Returns an ``int`` of the total number of offensive rebounds the team
has grabbed.
"""
return self._offensive_rebounds
@int_property_decorator
def defensive_rebounds(self):
"""
Returns an ``int`` of the total number of defensive rebounds the team
has grabbed.
"""
return self._defensive_rebounds
@int_property_decorator
def total_rebounds(self):
"""
Returns an ``int`` of the total number of rebounds the team has
grabbed.
"""
return self._total_rebounds
@int_property_decorator
def assists(self):
"""
Returns an ``int`` of the total number of field goals that were
assisted.
"""
return self._assists
@int_property_decorator
def steals(self):
"""
Returns an ``int`` of the total number of times the team stole the ball
from the opponent.
"""
return self._steals
@int_property_decorator
def blocks(self):
"""
Returns an ``int`` of the total number of times the team blocked an
opponent's shot.
"""
return self._blocks
@int_property_decorator
def turnovers(self):
"""
Returns an ``int`` of the total number of times the team has turned the
ball over.
"""
return self._turnovers
@int_property_decorator
def personal_fouls(self):
"""
Returns an ``int`` of the total number of times the team has fouled an
opponent.
"""
return self._personal_fouls
@int_property_decorator
def points(self):
"""
Returns an ``int`` of the total number of points the team has scored
during the season.
"""
return self._points
@int_property_decorator
def opp_field_goals(self):
"""
Returns an ``int`` of the total number of field goals the opponents
made during the season.
"""
return self._opp_field_goals
@int_property_decorator
def opp_field_goal_attempts(self):
"""
Returns an ``int`` of the total number of field goals the opponents
attempted during the season.
"""
return self._opp_field_goal_attempts
@float_property_decorator
def opp_field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of field goals made divided by
the number of attempts by the opponent. Percentage ranges from 0-1.
"""
return self._opp_field_goal_percentage
@int_property_decorator
def opp_three_point_field_goals(self):
"""
Returns an ``int`` of the total number of three point field goals the
opponent made during the season.
"""
return self._opp_three_point_field_goals
@int_property_decorator
def opp_three_point_field_goal_attempts(self):
"""
Returns an ``int`` of the total number of three point field goals the
opponent attempted during the season.
"""
return self._opp_three_point_field_goal_attempts
@float_property_decorator
def opp_three_point_field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of three point field goals made
divided by the number of attempts by the opponent. Percentage ranges
from 0-1.
"""
return self._opp_three_point_field_goal_percentage
@int_property_decorator
def opp_two_point_field_goals(self):
"""
Returns an ``int`` of the total number of two point field goals the
opponent made during the season.
"""
return self._opp_two_point_field_goals
@int_property_decorator
def opp_two_point_field_goal_attempts(self):
"""
Returns an ``int`` of the total number of two point field goals the
opponent attempted during the season.
"""
return self._opp_two_point_field_goal_attempts
@float_property_decorator
def opp_two_point_field_goal_percentage(self):
"""
Returns a ``float`` of the percentage of two point field goals made
divided by the number of attempts by the opponent. Percentage ranges
from 0-1.
"""
return self._opp_two_point_field_goal_percentage
@int_property_decorator
def opp_free_throws(self):
"""
Returns an ``int`` of the total number of free throws made during the
season by the opponent.
"""
return self._opp_free_throws
@int_property_decorator
def opp_free_throw_attempts(self):
"""
Returns an ``int`` of the total number of free throw attempts during
the season by the opponent.
"""
return self._opp_free_throw_attempts
@float_property_decorator
def opp_free_throw_percentage(self):
"""
Returns a ``float`` of the percentage of free throws made divided by
the attempts by the opponent. Percentage ranges from 0-1.
"""
return self._opp_free_throw_percentage
@int_property_decorator
def opp_offensive_rebounds(self):
"""
Returns an ``int`` of the total number of offensive rebounds the
opponent grabbed.
"""
return self._opp_offensive_rebounds
@int_property_decorator
def opp_defensive_rebounds(self):
"""
Returns an ``int`` of the total number of defensive rebounds the
opponent grabbed.
"""
return self._opp_defensive_rebounds
@int_property_decorator
def opp_total_rebounds(self):
"""
Returns an ``int`` of the total number of rebounds the opponent
grabbed.
"""
return self._opp_total_rebounds
@int_property_decorator
def opp_assists(self):
"""
Returns an ``int`` of the total number of field goals that were
assisted by the opponent.
"""
return self._opp_assists
@int_property_decorator
def opp_steals(self):
"""
Returns an ``int`` of the total number of times the opponent stole the
ball from the team.
"""
return self._opp_steals
@int_property_decorator
def opp_blocks(self):
"""
Returns an ``int`` of the total number of times the opponent blocked
the team's shot.
"""
return self._opp_blocks
@int_property_decorator
def opp_turnovers(self):
"""
Returns an ``int`` of the total number of times the opponent turned the
ball over.
"""
return self._opp_turnovers
@int_property_decorator
def opp_personal_fouls(self):
"""
Returns an ``int`` of the total number of times the opponent fouled the
team.
"""
return self._opp_personal_fouls
@int_property_decorator
def opp_points(self):
"""
Returns an ``int`` of the total number of points the team has been
scored on during the season.
"""
return self._opp_points
class Teams:
"""
A list of all NBA teams and their stats in a given year.
Finds and retrieves a list of all NBA teams from
www.basketball-reference.com and creates a Team instance for every team
that participated in the league in a given year. The Team class comprises
a list of all major stats and a few identifiers for the requested season.
Parameters
----------
year : string (optional)
The requested year to pull stats from.
"""
def __init__(self, year=None):
self._teams = []
team_data_dict, year = _retrieve_all_teams(year)
self._instantiate_teams(team_data_dict, year)
def __getitem__(self, abbreviation):
"""
Return a specified team.
Returns a team's instance in the Teams class as specified by the team's
abbreviation.
Parameters
----------
abbreviation : string
An NBA team's three letter abbreviation (ie. 'DET' for Detroit
Pistons).
Returns
-------
Team instance
If the requested team can be found, its Team instance is returned.
Raises
------
ValueError
If the requested team is not present within the Teams list.
"""
for team in self._teams:
if team.abbreviation.upper() == abbreviation.upper():
return team
raise ValueError('Team abbreviation %s not found' % abbreviation)
def __call__(self, abbreviation):
"""
Return a specified team.
Returns a team's instance in the Teams class as specified by the team's
abbreviation. This method is a wrapper for __getitem__.
Parameters
----------
abbreviation : string
An NBA team's three letter abbreviation (ie. 'DET' for Detroit
Pistons).
Returns
-------
Team instance
If the requested team can be found, its Team instance is returned.
"""
return self.__getitem__(abbreviation)
def __str__(self):
"""
Return the string representation of the class.
"""
teams = [f'{team.name} ({team.abbreviation})'.strip()
for team in self._teams]
return '\n'.join(teams)
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def __iter__(self):
"""Returns an iterator of all of the NBA teams for a given season."""
return iter(self._teams)
def __len__(self):
"""Returns the number of NBA teams for a given season."""
return len(self._teams)
def _instantiate_teams(self, team_data_dict, year):
"""
Create a Team instance for all teams.
Once all team information has been pulled from the various webpages,
create a Team instance for each team and append it to a larger list of
team instances for later use.
Parameters
----------
team_data_dict : dictionary
A ``dictionary`` containing all stats information in HTML format as
well as team rankings, indexed by team abbreviation.
year : string
A ``string`` of the requested year to pull stats from.
"""
if not team_data_dict:
return
for team_data in team_data_dict.values():
team = Team(team_data=team_data['data'],
rank=team_data['rank'],
year=year)
self._teams.append(team)
@property
def dataframes(self):
"""
Returns a pandas DataFrame where each row is a representation of the
Team class. Rows are indexed by the team abbreviation.
"""
frames = []
for team in self.__iter__():
frames.append(team.dataframe)
return pd.concat(frames)
``` |
{
"source": "JordanDekker/ContainR",
"score": 4
} |
#### File: core/containr/cluster_sequences.py
```python
import pandas as pd
from Bio import Align
from Bio.SubsMat import MatrixInfo
from sklearn.cluster import KMeans
def cluster_seq(main_df, prefix='', num_clusters=3):
"""Clusters the sequences to see similair sequences.
Args:
main_df: The whole pandas dataframe-subset that you want to visualise.
prefix: 'fw_' or 'rv_' optional prefix for if you want to cluster fw or rv sequences.
num_clusters: The amount of clusters you want. Default value is 3.
Returns:
main_df: A dataframe sorted by the clusters they're put in.
"""
distance_df = pd.DataFrame(index=main_df.index, columns=main_df.index)
aligner = Align.PairwiseAligner()
aligner.substitution_matrix = MatrixInfo.blosum62
for main_df_row1 in main_df.index:
for main_df_row2 in main_df.index[main_df_row1-1:]:
sequence1 = main_df[prefix+'seq'][main_df_row1]
sequence2 = main_df[prefix+'seq'][main_df_row2]
score = aligner.score(sequence2, sequence1)
distance_df.at[main_df_row1, main_df_row2] = score
distance_df.at[main_df_row2, main_df_row1] = score
km = KMeans(n_clusters=num_clusters, init='k-means++', n_init=10)
km.fit(distance_df)
clusters = km.fit_predict(distance_df)
main_df[prefix+'cluster'] = clusters
main_df = main_df.sort_values(by=[prefix+'cluster'])
return main_df
```
#### File: core/utils/preprocess_utils.py
```python
import re
from Bio.Seq import Seq
from app.core.exceptions.exceptions import NonDNAException
ALLOWED_EXTENSIONS = set(['fastq', 'csv', 'tsv', 'xml', 'out', 'zip', 'gz'])
def get_reverse_complement(input_seq):
"""Returns the reverse complement of a sequence.
Args:
input_seq: DNA or RNA sequence, must be a string.
Returns:
String: The reverse complement of input_seq.
Raises:
NonDNAException: If the sequence contains non-DNA characters.
TypeError: If input_seq is not a string.
"""
try:
if check_dna(input_seq.upper()):
return str(Seq(input_seq).reverse_complement())
else:
raise NonDNAException
except TypeError:
raise TypeError
def check_dna(seq, code=re.compile(r'[^ACGTN.]').search):
"""Checks if a sequence consists of DNA-characters.
Args:
seq: Sequence, must be a string.
code: Regex pattern.
Returns:
True: If the sequence is pure DNA.
False: If the sequence if not (pure) DNA.
"""
return not bool(code(seq))
def allowed_file(filename):
"""Checks if a file is allowed by checking the extention.
Args:
filename: String of path to file.
Returns:
True: If the file is allowed.
False: If the file is not allowed.
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
```
#### File: core/utils/to_json.py
```python
import pandas as pd
import numpy as np
import collections
import json
import math
from statistics import median
def seq_length_to_json(df):
"""Convert sequence length distribution to JSON object.
Args:
df: DataFrame containing a subset of sequence length.
Returns:
String: A JSON-formatted string for visualization of sequence length distribution.
"""
json_data = {}
for column in list(df):
json_values = []
distribution = collections.Counter(df.loc[:, column].dropna().tolist())
for key, value in distribution.items():
json_values.append({"x": int(key), "y": value})
json_data[column] = json_values
return json.dumps(json_data)
def perc_count_to_json(df):
"""?"""
df_count = pd.Series.to_frame(df)
df_count.index.names = ["perc"]
return df_count.to_json(orient="table")
def get_paired_percentage_to_json(df):
"""Get a JSON object containing the percentage of True and False paired reads.
Args:
df: Pandas DataFrame.
Returns:
String: A JSON-formatted string.
"""
df = df.loc[df["paired_flag"] == False]
df = df if len(df.index) > 0 else df.loc[df["paired_flag"] == True]
json_paired_data = []
paired_seqs = df.groupby(["paired"]).size()
if paired_seqs.count() < 2:
json_paired_data.append({"name": "True", "y": 100})
json_paired_data.append({"name": "False", "y": 0})
return json.dumps(json_paired_data)
else:
paired_seq_number = paired_seqs.get_values()
true_values = paired_seq_number[1]
false_values = paired_seq_number[0]
total = true_values + false_values
true_values_percentage = round((true_values/total)*100, 3)
false_values_percentage = round((false_values/total)*100, 3)
json_paired_data.append({"name": "True", "y": true_values_percentage})
json_paired_data.append({"name": "False", "y": false_values_percentage})
return json.dumps(json_paired_data)
def nucleotide_percentages_to_json(df):
"""Calculate box plot values from nucleotide percentages.
Args:
df: A pandas dataframe containing a subset of nucleotide percentages.
Returns:
json_data: A JSON-formatted list for visualization of nucleotide percentages.
"""
json_data = []
for pair in enumerate(df.iteritems()):
data = list(pair[1][1].values)
data = sorted(data)
# Calculate quartiles
q1, q3 = np.percentile(data,[25,75])
q2 = median(data)
# Calculate IQR (Interquartile Range)
iqr = q3 - q1
# Define bounds
lower_bound = q1 -(1.5 * iqr)
upper_bound = q3 +(1.5 * iqr)
# Get outliers
outliers = [i for i in data if i < lower_bound or i > upper_bound]
non_outliers = [i for i in data if i >= lower_bound and i <= upper_bound]
# Calculate total reads in every quarter. This includes outliers.
# TODO Think about where to use >= or > and < or <=.
quarter1 = len([i for i in data if i < q1])
quarter2 = len([i for i in data if i >= q1 and i < q2])
quarter3 = len([i for i in data if i >= q2 and i < q3])
quarter4 = len([i for i in data if i >= q3])
# Min and max within bounds, median, Q's.
# The order of these matters for the visualisation in the graph.
box_values = [min(non_outliers), q1, q3, max(non_outliers), q2]
json_data.append({'x':pair[0],'label':pair[1][0],'y':box_values, 'outliers': outliers, 'quarter1': quarter1, 'quarter2': quarter2, 'quarter3': quarter3, 'quarter4': quarter4})
return json_data
```
#### File: app/web/routes.py
```python
import datetime
import os
import re
import uuid
from shutil import rmtree
from flask import render_template, redirect, url_for, session, flash, request
from werkzeug.utils import secure_filename
from app.core.preprocessing.parser import preprocess_fastq_files, initialize_empty_dataframe, preprocess_tsv_file, splitTSV
from app.core.utils.preprocess_utils import allowed_file
from app.core.objects.FastQDataframe import FastQDataframe
from app.web import bp
from app.web.forms import FastQForm, TSVForm, XMLForm
#last_purge = None
<EMAIL>_request
#def before_request():
#global last_purge
#now = datetime.datetime.now()
#try:
#if last_purge:
#delta = now - last_purge
#if delta.seconds > 3600:
#for dir in os.listdir(os.path.abspath("data/")):
#if re.search("([a-zA-Z0-9_]+-)+([a-zA-Z0-9_]+)", dir):
#for file in os.listdir(os.path.abspath("data/"+dir)):
#print(os.path.join(os.path.abspath("data/"+dir), file), "automatically removed")
#os.remove(os.path.join(os.path.abspath("data/"+dir), file))
#last_purge = now
#else:
#last_purge = now
#except:
#pass
@bp.route('/')
@bp.route('/index')
def index():
return render_template('index.html')
#Function that will load the preprocess page and collect stats for on the page.
@bp.route('/preprocessing', methods = ['GET','POST'])
def preprocessing():
session_id = session['id']
try:
fastq_df = FastQDataframe.load_pickle("data/" + session_id + "/pickle.pkl")
gotBlast = "fw_full_tax" in fastq_df.get_dataframe().columns
except FileNotFoundError:
return render_template('expired.html')
dictStats = {}
fwTotal, rvTotal = fastq_df.getTotalReads()
minLength = fastq_df.getMinLength()
maxLength = fastq_df.getMaxLength()
maxForwardLength = fastq_df.getMaxLength("fw")
maxReverseLength = fastq_df.getMaxLength("rv")
fwMaxQuality, rvMaxQuality = fastq_df.getMaxQuality()
#Fill a python dictionary with stats about the input data.
dictStats.update(fastq_df.get_preprocessOptions())
dictStats.update({"fw_total": fwTotal, "rv_total": rvTotal, "min_length": minLength, "max_length": maxLength, "max_fw_length": maxForwardLength, "max_rv_length": maxReverseLength, "fw_max_quality": fwMaxQuality,
"rv_max_quality": rvMaxQuality
})
jsonOptions = fastq_df.get_originalPreprocessOptionsJSON()
return render_template('preprocessing.html', stats = jsonOptions, stats2 = dictStats, gotBlast = gotBlast)
# Function that provide the buttons to import files and calls functions to process the input
@bp.route('/indexNewSession', methods=['GET', 'POST'])
def indexNewSession():
form = FastQForm() # This variable is used to create the import buttons and their functionality
# Code that is used when the user press the submit button:
if form.validate_on_submit():
# Functions to check if the file type is correct:
fw_file = secure_filename(form.forward_file.data.filename)
rv_file = secure_filename(form.reverse_file.data.filename)
# When the files are of the correct filetype use the following code:
if allowed_file(fw_file) and allowed_file(rv_file):
try:
session_id = get_sessionID()
if os.path.exists('data/'+session_id):
if session.get("id"):
session.pop("id")
session_id = get_sessionID()
finally:
# Rename the files:
renamed_fw_file = 'fw_file.'+fw_file.rsplit('.', 1)[1].lower()
renamed_rc_file = 'rv_file.'+rv_file.rsplit('.', 1)[1].lower()
# Create directory and save the files in it:
if not os.path.exists('data/'+session_id):
try:
os.makedirs('data/'+session_id)
form.forward_file.data.save('data/' + session_id + '/' + renamed_fw_file)
form.reverse_file.data.save('data/' + session_id + '/' + renamed_rc_file)
# Create the FastQDataFrame object with the files and export it as a pickle:
preprocess_fastq_files('data/' + session_id + '/' + renamed_fw_file, 'data/' + session_id + '/' + renamed_rc_file, session_id)
flash('Files were successfully uploaded!')
return redirect(url_for('web.indexNewSession'))
except Exception as e:
if os.path.exists('data/'+session_id):
rmtree('data/'+session_id)
session.clear()
print(e) # print error on server side
flash('An error occurred while parsing the input files, please make sure the '
'files conform the fastq standard')
return redirect(url_for('web.indexNewSession'))
else:
flash("Files are already uploaded. Please reset the files first")
return redirect(url_for('web.indexNewSession'))
else:
flash('Unsupported file types')
return redirect(url_for('web.indexNewSession'))
return render_template('indexNewSession.html', form=form, scroll = "import")
# Function that allows to upload a TSV file
@bp.route('/indexLoadSession', methods = ['GET','POST'])
def indexLoadSession():
form = TSVForm() # This variable is used to create the import buttons and their functionality
try:
didBlast = request.args['didBlast']
except Exception:
didBlast = None
# Code that is used when the user press the submit button:
if form.validate_on_submit():
# Functions to check if the file type is correct:
tsvfile = secure_filename(form.tsv_file.data.filename)
# When the files are of the correct filetype use the following code:
if allowed_file(tsvfile):
try:
session_id = get_sessionID()
if os.path.exists('data/'+session_id):
if session.get("id"):
session.pop("id")
session_id = get_sessionID()
finally:
# Rename the files:
renamed_tsv_file = 'data/' + session_id + '/' + 'tsv_file.'+tsvfile.rsplit('.', 1)[1].lower()
# Create directory and save the files in it:
if not os.path.exists('data/'+session_id):
try:
os.makedirs('data/'+session_id)
form.tsv_file.data.save(renamed_tsv_file)
# Create the FastQDataFrame object with the files and export it as a pickle:
didBlast = preprocess_tsv_file(renamed_tsv_file, session_id)
flash('Files were successfully uploaded!')
return redirect(url_for('web.indexLoadSession', didBlast = didBlast))
except Exception as e:
if os.path.exists('data/'+session_id):
rmtree('data/'+session_id)
session.clear()
print(e) # print error on server side
flash(e)
return redirect(url_for('web.indexLoadSession'))
else:
flash("Files are already uploaded. Please reset the files first")
return redirect(url_for('web.indexNewSession'))
else:
flash('Unsupported file types')
return redirect(url_for('web.indexLoadSession'))
return render_template('indexLoadSession.html', form=form, scroll = "import", didBlast = didBlast)
def get_sessionID():
try:
session_id = session['id'] # Session_id is the key to know which directory the files need to be saved to
except KeyError:
session_id = str(uuid.uuid1()) # Create an unique ID code
session['id'] = session_id
return session_id
@bp.route('/blastPage', methods=['GET', 'POST'])
def blastPage():
#get the desired form class
form = XMLForm()
#check if the form has anython in it
if form.validate_on_submit():
#get the file name
xml_file = secure_filename(form.xml_file.data.filename)
#check if the extension is allowed
if allowed_file(xml_file):
#try to get the session else make one
try:
session_id = session['id']
except:
session_id = str(uuid.uuid1())
session['id'] = session_id
finally:
#check if the path exist and if an pickel exist else make one dir and an empty pickle
if not os.path.exists('data/'+session_id):
os.makedirs('data/'+session_id)
if not os.path.exists('data/'+session_id+'/'+'pickle.pkl'):
initialize_empty_dataframe(session_id)
#add the xml file to the session dir
renamed_fw_file = 'blastout.xml'
form.xml_file.data.save('data/' + session_id + '/' + renamed_fw_file)
flash('Files were successfully uploaded!')
return redirect(url_for('web.blastPage', upload=True, haveResults = False))
else:
flash('Unsupported file types')
return redirect(url_for('web.blastPage', upload=False, haveResults = False))
df = FastQDataframe.load_pickle("data/" + session['id'] + "/pickle.pkl")
return render_template('blastPage.html', form=form, upload=False, haveResults = str("fw_full_tax" in df.get_dataframe().columns))
@bp.route('/results', methods=['GET', 'POST'])
def results():
return render_template('results.html')
@bp.route('/about')
def about():
return render_template('about.html')
``` |
{
"source": "jordandelaney/microblog",
"score": 2
} |
#### File: microblog/accounts/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404, Http404
from django.contrib.auth import login
from django.contrib.auth import get_user_model
from .models import CustomUser
from .forms import CustomUserCreationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
# Get custom user model
User = get_user_model()
def register(request):
"""Register a new user"""
if request.user.is_authenticated:
raise Http404
if request.method != 'POST':
#Display blank registration form
form = CustomUserCreationForm()
else:
#Process completed form
form = CustomUserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
#Log the user in and then redirect to home page.
login(request, new_user)
return redirect('blog:index')
#Display a blank or invalid form
context = {'form': form}
return render(request, 'registration/register.html', context)
@login_required
def profile(request):
"""Display a page with user profile info"""
user = get_object_or_404(CustomUser, pk=request.user.pk)
context = {
'user': user
}
return render(request, 'accounts/profile.html', context)
``` |
{
"source": "jordan-dimov/stinky-games",
"score": 4
} |
#### File: stinkies/game1/game.py
```python
from random import choice, shuffle
from game1.words import adjectives, things
def pick_random_thing():
return " ".join((choice(adjectives), choice(things)))
class Bag:
def __init__(self, name):
self.name = name
self.contents = []
self.coins = 0
def load_random_things(self, n=1):
for i in range(n):
self.contents.append(pick_random_thing())
self.coins += 2
def __str__(self):
contents = ", ".join(self.contents)
return "{} ({})\n{}".format(self.name, self.coins, contents)
def __repr__(self):
return str(self)
def trade(self, bag, thing=None):
old_thing = thing or choice(self.contents)
self.contents.remove(old_thing)
new_thing = choice(bag.contents)
bag.contents.remove(new_thing)
self.contents.append(new_thing)
bag.contents.append(old_thing)
return (old_thing, new_thing)
def buy_new_thing(self):
if self.coins >= 2:
self.contents.append(pick_random_thing())
self.coins -= 2
def buy_from(self, bag, thing, coins=2):
if self.coins >= coins:
bag.contents.remove(thing)
self.contents.append(thing)
self.coins -= coins
def sell_back(self, thing):
self.contents.remove(thing)
self.coins += 1
michelle = Bag("Michelle")
michelle.load_random_things(n=6)
bea = Bag("Bea")
bea.load_random_things(n=6)
daddy = Bag("Daddy")
daddy.load_random_things(n=6)
mummy = Bag("Mum")
mummy.load_random_things(n=6)
```
#### File: stinkies/game1/models.py
```python
from django.contrib.auth.models import User
from django.db import models
from django_extensions.db.models import TimeStampedModel
DEFAULT_NEW_PLAYER_COINS = 12
DEFAULT_NEW_ITEM_COST = 2
class Stinky(models.Model):
name = models.CharField(max_length=255, unique=True)
price = models.PositiveIntegerField(default=DEFAULT_NEW_ITEM_COST)
def __str__(self):
return self.name
class Meta:
verbose_name = "stinky"
verbose_name_plural = "stinkies"
class InventoryItem(TimeStampedModel):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="inventory")
item = models.ForeignKey(Stinky, on_delete=models.CASCADE)
bought_for = models.PositiveIntegerField(blank=True, null=True)
def __str__(self):
return str(self.item)
def is_for_trade(self):
return self.item.for_trade.first()
class Player(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
coins = models.PositiveIntegerField(default=DEFAULT_NEW_PLAYER_COINS)
def __str__(self):
return "{}".format(self.user)
class WantsToTrade(TimeStampedModel):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="wants_to_trade")
item = models.ForeignKey(Stinky, on_delete=models.CASCADE, related_name='for_trade')
```
#### File: stinkies/game1/services.py
```python
from django.db import transaction as db_transaction
from game1.game import pick_random_thing
from game1.models import Stinky, InventoryItem
def get_a_stinky():
stinky_name = pick_random_thing()
stinky, created = Stinky.objects.get_or_create(name=stinky_name)
return stinky
@db_transaction.atomic
def buy_a_stinky(player):
stinky = get_a_stinky()
if player.coins >= stinky.price:
item = InventoryItem.objects.create(
user=player.user,
item=stinky,
bought_for=stinky.price,
)
player.coins -= stinky.price
player.save()
return item
return None
@db_transaction.atomic
def sell_back_stinky(player, inventory_item):
sell_price = max(1, round((inventory_item.bought_for or 0) / 2))
inventory_item.delete()
player.coins += sell_price
player.save()
return sell_price
```
#### File: stinkies/portal/views.py
```python
from django.contrib import messages
from django.shortcuts import render
from django.utils.safestring import mark_safe
from friendship.models import Friend
def homepage_view(request):
context = {
}
if request.user.is_authenticated:
friendship_requests = Friend.objects.requests(request.user)
for frq in friendship_requests:
msg = "{0} wants to be friends with you! <a href='#' id='frq_accept' onclick='accept_frq(\"{1}\");'>Accept</a> | <a id='frq_reject' href='#'>Reject</a>".format(frq.from_user, frq.id)
messages.info(request, mark_safe(msg))
context['frqs'] = friendship_requests
return render(request, "portal/homepage.html", context=context)
``` |
{
"source": "jordandjp/fimage",
"score": 2
} |
#### File: fimage/fimage/converters.py
```python
import numpy as np
def rgb2hsv(rgb: np.ndarray) -> np.ndarray:
axis = rgb.ndim - 1
rgb = rgb.astype(np.float64) / 255
r = rgb[..., 0]
g = rgb[..., 1]
b = rgb[..., 2]
max_array = rgb.max(axis=axis)
min_array = rgb.min(axis=axis)
v = max_array
d = max_array - min_array
old_settings = np.seterr(invalid="ignore")
d[d == 0] = 0
s = np.where(max_array == 0, 0, d / max_array)
h = np.zeros_like(s)
np.putmask(
h,
max_array == r,
((g - b) / d) + np.where(g < b, 6, 0).astype(np.float64),
)
np.putmask(h, max_array == g, (b - r) / d + 2)
np.putmask(h, max_array == b, (r - g) / d + 4)
np.seterr(**old_settings)
h = h / 6
hsv = np.empty_like(rgb)
hsv[..., 0] = h
hsv[..., 1] = s
hsv[..., 2] = v
hsv[np.isnan(hsv)] = 0
return hsv
def hsv2rgb(hsv: np.ndarray) -> np.ndarray:
input_shape = hsv.shape
hsv = hsv.reshape(-1, 3)
h, s, v = hsv[:, 0], hsv[:, 1], hsv[:, 2]
i = np.int32(h * 6.0)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
rgb = np.zeros_like(hsv)
v = v.reshape(-1, 1)
t = t.reshape(-1, 1)
p = p.reshape(-1, 1)
q = q.reshape(-1, 1)
rgb[i == 0] = np.hstack([v, t, p])[i == 0]
rgb[i == 1] = np.hstack([q, v, p])[i == 1]
rgb[i == 2] = np.hstack([p, v, t])[i == 2]
rgb[i == 3] = np.hstack([p, q, v])[i == 3]
rgb[i == 4] = np.hstack([t, p, v])[i == 4]
rgb[i == 5] = np.hstack([v, p, q])[i == 5]
rgb[s == 0.0] = np.hstack([v, v, v])[s == 0.0]
return rgb.reshape(input_shape)
``` |
{
"source": "jordandll/squid",
"score": 3
} |
#### File: build/lib.linux-armv6l-2.7/button.py
```python
import RPi.GPIO as GPIO
import time
class Button:
BUTTON_PIN = 0
DEBOUNCE = 0
def __init__(self, button_pin, debounce=0.05):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.BUTTON_PIN = button_pin
self.DEBOUNCE = debounce
print(debounce)
GPIO.setup(self.BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def is_pressed(self):
now = time.time()
if GPIO.input(self.BUTTON_PIN) == False:
time.sleep(self.DEBOUNCE)
# wait for button release
while not GPIO.input(self.BUTTON_PIN):
pass
return True
return False
```
#### File: jordandll/squid/squid.py
```python
import RPi.GPIO as GPIO
import time
WHITE = [30, 30, 30]
OFF = [0, 0, 0]
RED = [100, 0, 0]
GREEN = [0, 100, 0]
BLUE = [0, 0, 100]
YELLOW = [50, 50, 0]
PURPLE = [50, 0, 50]
CYAN = [0, 50, 50]
class Squid:
RED_PIN = 0
GREEN_PIN = 0
BLUE_PIN = 0
red_pwm = 0
green_pwm = 0
blue_pwm = 0
def __init__(self, red_pin, green_pin, blue_pin):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.RED_PIN, self.GREEN_PIN, self.BLUE_PIN = red_pin, green_pin, blue_pin
GPIO.setup(self.RED_PIN, GPIO.OUT)
self.red_pwm = GPIO.PWM(self.RED_PIN, 500)
self.red_pwm.start(0)
GPIO.setup(self.GREEN_PIN, GPIO.OUT)
self.green_pwm = GPIO.PWM(self.GREEN_PIN, 500)
self.green_pwm.start(0)
GPIO.setup(self.BLUE_PIN, GPIO.OUT)
self.blue_pwm = GPIO.PWM(self.BLUE_PIN, 500)
self.blue_pwm.start(0)
def set_red(self, brightness):
self.red_pwm.ChangeDutyCycle(brightness)
def set_green(self, brightness):
self.green_pwm.ChangeDutyCycle(brightness)
def set_blue(self, brightness):
self.blue_pwm.ChangeDutyCycle(brightness)
def set_color(self, rgb, brightness = 100):
self.set_red(rgb[0] * brightness / 100)
self.set_green(rgb[1] * brightness / 100)
self.set_blue(rgb[2] * brightness / 100)
def set_color_rgb(self, rgb_string):
self.set_red(int(rgb_string[1:3], 16) / 255.0)
self.set_green(int(rgb_string[3:5], 16) / 255.0)
self.set_blue(int(rgb_string[5:7], 16) / 255.0)
``` |
{
"source": "jordandll/XMath",
"score": 4
} |
#### File: jordandll/XMath/polynomial.py
```python
from math import sqrt, factorial
from collections.abc import Sequence
from collections import namedtuple
from numbers import Number
def deg2 (a, b, c):
""" Generate a Degree 2 Polynomial
Returns a functon, denoted as 'f(x | a, b, c)=ax^2+bx+c', where 'a', 'b', and 'c' are equal to the arguments passed to their respective parameters in this function."""
return lambda x: a*x**2 + b*x + c
def degN (*args):
""" Generate a Degree N Polynomial
Returns a functon, denoted as 'f(x | a_0, a_1, ... , a_i, ... , a_N)= a_N*x^N + ... +a_i*x^i + ... + a_1*x + a_0, where N=len(args)-1.
The elements in 'args' equal the coefficients of their corresponding term in the function, 'f'; And the index of each element in 'args' is equal to the
exponent of the variable, 'x', in it's corresponding term in 'f'.
Example: An argument list of [5, 1, 2] will result in the function, f(x) = 2x^2 + x + 5, being returned.
"""
return lambda x: sum(a*x**i for i, a in enumerate(args))
def extremum (a, b, c) -> tuple:
""" Returns the (x, y) coordinates of the extremum of the curve given by the polynomial, f(x | a, b, c).
The extremum can refer to either a maximum or minimum value. When 'a' is negative, the max or top of the curve is returned. Otherwise, the min is returned.
The value of the x-coordinate can be thought of as the midpoint of the curve."""
# Check params.
if a == 0:
raise ValueError('Argument Value Error:\tThe parameter, \'a\', in a polynomial, f(x) = ax^2 + bx + c, cannot be equal to zero.')
x = -b/(2*a)
return (x, a*x**2 + b*x + c)
def roots (a, b, c) -> set:
""" Find the Roots of a Quadratic Polynomial
Returns the set of values that satisfies the equation, ax^2 + bx + c = 0. """
# Check for argument errors.
if a == 0: raise ValueError('Argument Value Error:\tThe parameter, \'a\', in a polynomial, f(x) = ax^2 + bx + c, cannot be equal to zero.')
t = b**2 - 4*a*c
if b == 0 and c == 0: res = {0}
elif t < 0: res = set()
else:
mp = -b/(2*a)
delta = sqrt(t)/(2*a)
res = {mp+delta, mp-delta}
return res
def add (p1, p2):
""" Adds the two Polynomials, 'p1' and 'p2'
The arguments can be a sequence of coefficients or an instance of the Polynomial class. """
res = [x[0] + x[1] for x in zip(p1, p2)]
n = len(res)
res.extend(max(p1, p2, key=len)[n:])
return res
def sub (p1, p2):
""" Subtracts the two Polynomials, 'p1' and 'p2'
The arguments can be a sequence of coefficients or an instance of the Polynomial class. """
res = [x[0] - x[1] for x in zip(p1, p2)]
n = len(res)
T = max(p1, p2, key=len)[n:]
res.extend((-t for t in T) if len(p2) > len(p1) else T)
return res
class Polynomial:
""" The base class for all Polynomials """
def __init__ (self, *args):
__doc__ = degN.__doc__
if args[-1] == 0: raise ValueError('Arugment Value Error:\tThe leading coefficient of a polynomial cannot be equal to zero.')
self.coefficients = tuple(args)
self.f = degN(*self.coefficients)
@staticmethod
def from_critical_points(*args):
""" Initialize a polynomial from a set of critical points, denoted as 'CP', instead of a collection of coefficients(the default method of initialization).
Notes:
1.) Each argument should be a pair of (x, y) coordinates for each critical point.
2.) Said pair can be in the form of a builtin python 2-tuple or some other indexable or subscriptable collection such that for all arguments, 'a',
in 'CP', a[0] returns the x-coordinate of the critical point and a[1] returns the y-coordinate.
3.) 'CP' should be ordered under magnitude in ascending order with respect to the x-coordinates -- i.e. CP[0].x < CP[1].x < ... < CP[-1].x."""
pass
# The number of terms and coefficients in the resulting polynomial.
n = len(args) + 2
# Initialize a list of coefficients
h = [0 for i in range(n)]
# Define a named pair of x,y-coordinates as a 'Point'.
Point = namedtuple('Point', ['x', 'y'])
# Convert the list of arguments to a list of Points.
CP = [Point(cp_x, cp_y) for cp_x, cp_y in args]
# Coefficients found from the x,y-coordinates of every critical point.
#for i in range(len(CP)):
# h[i] =
def __call__ (self, x = None):
""" This method behaves differently depending on the argument type of 'x'.
1.) When 'x' is a Number, it returns the f(x), where 'f' is the underyling function of this Polynomial.
2.) When 'x' is a Polynomial or a Sequence of coefficients it returns the resulting Polynomial of performing function composition on this Polynomial and 'x'.
3.) When 'x' is None, it returns this Polynomial, which is useful when performing function composition. For example, 'P1(P2())', will perform function composition on
the Polynomials, P1 and P2, and return the resulting Polynomial."""
pass
if isinstance(x, Number): return self.f(x)
elif isinstance(x, Polynomial):
# Perform function composition and return a new Polynomial.
# TODO: Implement this part of the method.
raise NotImplementedError()
elif isinstance(x, Sequence):
# Perform function composition and return a new Polynomial.
# TODO: Implement this part of the method.
raise NotImplementedError()
elif x == None: return self
else: raise TypeError(f'Argument Type Error:\t{__name__} only takes instances of Numbers, Polynomials, and Sequences as arguments.')
def degree(self) -> int: return len(self.coefficients)-1
def __len__(self) -> int:
return len(self.coefficients)
def __getitem__ (self, i: int):
""" Get the coefficient at index 'i'. Note that 'i' equals the exponent of the variable, 'x', in the corresponding term of the polynomial."""
return self.coefficients[i]
def __add__ (self, rhs):
__doc__ = add.__doc__
if isinstance(rhs, Polynomial): res = Polynomial(*add(self.coefficients, rhs.coefficients))
elif isinstance(rhs, Sequence): res = Polynomial(*add(self.coefficients, rhs))
else: raise TypeError('Argument Type Error:\tOnly a Polynomial or a sequence of coefficients can be added to a Polynomial.')
return res
def __sub__ (self, rhs):
__doc__ = add.__doc__
if isinstance(rhs, Polynomial): res = Polynomial(*sub(self.coefficients, rhs.coefficients))
elif isinstance(rhs, Sequence): res = Polynomial(*sub(self.coefficients, rhs))
else: raise TypeError('Argument Type Error:\tOnly a Polynomial or a sequence of coefficients can be subtracted from a Polynomial.')
return res
def __mul__ (self, rhs):
""" Multiply two Polynomials """
if isinstance(rhs, Polynomial):
# Add the degrees to find the degree, 'n', of the resulting polynomial. Then initialize a list, denoted as 'res', of coefficients for said polynomial.
n = self.degree() + rhs.degree()
res = [0 for i in range(n+1)]
# Distribute the right hand side, denoted as 'rhs', polynomial to the left hand side, 'lhs', polynomial -- or 'self' -- and then perform polynomial addition.
for idx_lhs, val_lhs in enumerate(self.coefficients):
for idx_rhs, val_rhs in enumerate(rhs.coefficients):
res[idx_lhs + idx_rhs] += val_lhs * val_rhs
else:
raise TypeError('Argument Type Error:\tOnly a Polynomial or a sequence of coefficients can be added to a Polynomial.')
return Polynomial(*res)
def __pow__(self, n: int):
""" Raise this polynomial to the n'th power, where 'n' is a non-negative whole number. """
from itertools import product as cartesian_product
# Check 'n'.
if n < 0:
raise ValueError('Argument Value Error:\tRaising a polynomial to a negative power is prohibited (atleast for now).')
elif n > 1:
deg1 = self.degree() * n
C = [0 for i in range(deg1 + 1)]
for T in cartesian_product(range(len(self.coefficients)), repeat=n):
p = sum(T)
c = 1
for t in T: c *= self.coefficients[t]
C[p] += c
return Polynomial(*C)
elif n == 0: return 1
else: return Polynomial(*self.coefficients)
def derivative (self, n: int = 1):
""" Perform the derivative function on this polynomial 'n' times.
Returns d^n(f)/(dx)^2, where 0 < n <= degree(f) and f(x) is this polynomial."""
if n < 1: raise ValueError('Argument Value Error:\tThe power of the derivative function must be a positive integer.')
elif n > self.degree(): raise ValueError('Argument Value Error:\tThe power of the derivative function cannot be greater than the degree of it\'s polynomial argument.')
return Polynomial(*tuple(c * factorial(i) / factorial(i-n) for i, c in enumerate(self.coefficients[n:], n)))
def anti_derivative(self, n: int =1):
"""Perform the anti-derivative, sometimes known as the primitive integral, function on this polynomial 'n' times, where n > 0."""
# Check parameter.
if n < 1: raise ValueError('Argument Value Error:\tThe power of the derivative function must be a positive integer.')
C = tuple(c * factorial(i) / factorial(i+n) for i, c in enumerate(self.coefficients))
return tuple(0 for i in range(n)) + C
class Linear (Polynomial):
""" Linear Polynomial
A linear polynomial is of the form:
f(x | a, b) := ax + b, where a != 0."""
def __init__(self, a, b):
""" Initialize a linear polynomial of the form:
f(x | a, b) := ax + b, where a != 0. """
if a == 0: raise ValueError('Argument Value Error:\tThe leading coefficent of a polynomial cannot be equal to zero.')
Polynomial.__init__(self, b, a)
b, a = self.coefficients
self._root = -b / a
@staticmethod
def from_kw_args(a, b):
"""Initialize a linear polynomial from keyword arguments. Said polynomial has the form:
f(x | a, b) := ax + b, where a != 0"""
return Linear(a, b)
@staticmethod
def from_pos_args(*args):
""" Initialize a linear polynomial of the form:
f(x | a, b) := ax + b, where a != 0.
Note that 'b' = args[0] and 'a' = args[1]."""
if len(args) != 2: raise IndexError('Argument Count Error:\tA linear polynomial must be initialized with exactly 2 coefficients.')
return Linear(a=args[1], b=args[0])
#@staticmethod
# from_tan_points = find_cubic_linearity
@property
def root(self):
return self._root
def _binom(self, n: int):
""" Invoke binomial theorem to raise this polynomial to the n'th power, where 'n' is a non-negative whole number."""
if n < 0: raise ValueError('Argument Value Error:\tRaising a polynomial to a negative power is prohibited (atleast for now).')
from num_theory import binomial_coefficient as bc
deg = n
b, a = self.coefficients
return Polynomial(*reversed(tuple(bc(n,m)*a**(n-m)*b**(m) for m in range(deg + 1))))
__pow__ = _binom
class Quadratic (Polynomial):
""" Quadratic Polynomial
A quadratic polynomial is of the form: f(x | a, b, c) := ax^2 + bx + c, where a != 0."""
def __init__ (self, a, b, c):
""" Initialize a Quadtratic Polynomial of the form:
f(x | a, b, c) := ax^2 + bx + c, where a != 0."""
# Check parameters.
if a == 0: raise ValueError('Argument Value Error:\tThe leading coefficent of a polynomial cannot be equal to zero.')
Polynomial.__init__(self, c, b, a)
self._roots = roots(a, b, c)
self._extremum = extremum(a, b, c)
@staticmethod
def from_pos_args(*args):
if len(args) != 3: raise IndexError('Argument Count Error:\tA quadratic or degree two polynomial must be initialized with exactly three coefficients.')
return Quadratic(args[2], args[1], args[0])
@staticmethod
def from_props(ex_x, ex_y, w, y = 0):
""" Create a quadratic polynomial from a set of properties instead of coefficients.
The following properties of the parabola given by the polynomial to be created are used:
1.) (ex_x, ex_y): The x, y coordinates of the extremum. This is the point where the slope of the curve is zero.
2.) w(y): The width of the parabola at any valid y-coordinate, ‘y’."""
a = (4*y - 4*ex_y) / (w**2)
b = -2*a*ex_x
c = ex_y + a * ex_x**2
return Quadratic(a, b, c)
@staticmethod
def from_tan_points(f, Ix: float):
"""Let the argument, 'f', be a cubic polynomial and 'g' be defined as a quadratic polynomial of the form:
g(x | a, b, c) := ax^2 + bx + c, where a != 0.
Suppose the curves given by ‘f’ and ‘g’ intersect at a point denoted as 'I = (Ix, Iy)', and the coefficients of ‘f’ are known, while the coefficients of ‘g’ are not known;
And the only other point of incidence between ‘f’ and ‘g’ is a tangential one. With this information it is possible to find the coefficients if ‘g’.
This method will initialize a Quadratic polynomial with said coefficients.
"""
# Check arguments.
# Solve for coefficients of 'g'.
g2 = (Ix*f[2]*f[3]) / (Ix*f[3] + f[2])
g1 = f[3]*Ix**2 + Ix*f[2] - Ix*g2 + f[1] - (Ix*f[3]+f[2]-g2)**2 / (4*f[3])
g0 = f(Ix) - Ix*g1 - g2*Ix**2
return Quadratic(g2, g1, g0)
@property
def roots(self):
__doc__ = roots.__doc__
return self._roots
@property
def extremum (self):
__doc__ = extremum.__doc__
return self._extremum
def width (self, y = 0):
""" Caclulates the width of the parabola given by the quadratic polynomial at the y-coordinate, 'y'.
When 'y' equals zero, the distance between the two roots (if they exist) is returned.
For example, width(0) = max(R) - min(R), where 'R' is the set of the roots of the polynomial.
Zero is returned if only one root exists, and an exception is raised if none exist."""
if self.coefficients[2] < 0 and y > self.extremum[1]: raise ValueError('Argument Value Error:\tWhen the leading coefficient of a quadratic polynomial, \
\'f\', is negative, \'y\' cannot be greater than the max(f).')
elif self.coefficients[2] > 0 and y < self.extremum[1]: raise ValueError('Argument Value Error:\tWhen the leading coefficient of a quadratic polynomial, \
\'f\', is positive, \'y\' cannot be less than the min(f).')
elif y == self.extremum[1]: return 0
a = self.coefficients[2]
b = self.coefficients[1]
c = self.coefficients[0]
return abs(sqrt(b**2 + 4*a*(y-c)) / a)
class Cubic (Polynomial):
"""A cubic polynomial is a third degree polynomial of the form:
f(x | a, b, c, d) := ax^3 + bx^2 + cx + d, where a != 0."""
def __init__(self, *args):
if len(args) != 4: raise IndexError('Argument Count Error:\tA cubic or 3rd degree polynomal must be initialized with exactly four coefficients, even if they are \
equal to zero.')
Polynomial.__init__(self, *args)
self._critical_points = self._find_critical_pnts()
self._inflection_pnt = self._find_inflection_pnt()
@staticmethod
def from_props(cp0x, cp0y, cp1x, cp1y):
"""Initialize a Cubic Polynomial from the x,y coordinates of a pair of critical points.
This is very useful in interpolation. See /doc/CubicFromProps for more info.
Parameters:
'cp0x' is the x-coordinate of the zero'th critical point.
'cp' stands for and denotes 'critical point';
'0' equals the index of the critical point;
And 'x' is the axis of the coordinate."""
# Calculate the x-coordinate of inflection point, denoted as IPx.
IPx = (cp1x + cp0x)/2
# Calculate the leading coefficient, 'a', and then the remaining coefficients.
a = (cp1y - cp0y) / (cp1x**3 - cp0x**3 - 3*IPx*cp1x**2 + 3*cp0x*cp1x**2 - 3*cp1x*cp0x**2 + 3*IPx*cp0x**2)
b = -3*a*IPx
c = 3*a*cp0x*cp1x
d = cp0y - a*cp0x**3 - 3*a*cp1x*cp0x**2 + 3*a*IPx*cp0x**2
return Cubic(d, c, b, a)
@property
def critical_points(self):
return self._critical_points
@property
def inflection_point(self):
return self._inflection_pnt
def _find_critical_pnts(self):
""" A 'private' method to find the critical points of the curve given by this polynomial.
Returns an n-tuple of x,y coordinates, where 'n' is the number of roots in the derivative of this polynomial and 'n' in {0, 1, 2}."""
# Find the roots of the derivative, denoted as 'p2'. These are the x-coordinates of each critical point.
df = self.derivative()
p2 = Quadratic(df.coefficients[2], df.coefficients[1], df.coefficients[0])
return tuple((r, self.f(r)) for r in p2.roots)
def _find_inflection_pnt(self):
d2f = self.derivative(2)
p1 = Linear.from_pos_args(*d2f.coefficients)
return (p1.root, self.f(p1.root))
def find_cubic_linearity(f, Ix: float):
"""Let the argument, 'f', be a cubic polynomial and 'l' be defined as a linear polynomial of the form:
l(x | m, s) := mx + s, where m != 0.
Suppose that 'l' intesects 'f' at a point with an x-coordinate that is equal to the argument, 'Ix'.
As long as the point of intersection, ‘I’, is not equal to the inflection point of ‘f’, ‘IP’,
it is possible to find the coefficients of 'l' such that there is exactly one other point of incidence between ‘f’ and ‘l’ and ‘l’ is tangential to ‘f’ at that point.
If 'I == IP' then the only point at which ‘l’ is tangential to ‘f’ is the inflection point; In that case ‘f’ minus ‘l’ is a complete cube of the form, '(ax+b)^3',
where a != 0.
This function will find the coefficients of 'l' and return them in the form of an instance of the polynomial.Linear class.
See doc/CubicLinearities.odt for more info."""
pass
# Check args.
if isinstance(f, Polynomial) == False: raise TypeError(f'Argument Type Error:\tOnly instances of the Polynomial class may be passed to the \'f\' parameter of {__name__}.')
if len(f) != 4: raise ValueError(f'Only degree 3 or cubic polynomials may be passed to \'f\'.')
# Definitions
d, c, b, a = f.coefficients
p = Ix
# 'C' is the coefficients of the quotient of dividing 'f-l' by 'x-p', exluding the leading coefficient which requires 'm' to be known.
C = (a, a*p+b)
# Solve for 'm' and 's'.
m = a*p**2 + p*b + c - C[1]**2 / (4*C[0])
s = f(p) - m*p
return Linear(m, s)
``` |
{
"source": "jordandukart/islandora_workbench",
"score": 2
} |
#### File: jordandukart/islandora_workbench/workbench_utils.py
```python
import os
import sys
import json
import csv
import openpyxl
import time
import string
import re
import copy
import logging
import datetime
import requests
import subprocess
import hashlib
import mimetypes
import collections
import urllib.parse
import magic
from pathlib import Path
from ruamel.yaml import YAML, YAMLError
from functools import lru_cache
import shutil
yaml = YAML()
def set_config_defaults(args):
"""Convert the YAML configuration data into an array for easy use.
Also set some sensible default config values.
"""
# Check existence of configuration file.
if not os.path.exists(args.config):
# Since the main logger gets its log file location from this file, we
# need to define a local logger to write to the default log file location,
# 'workbench.log'.
logging.basicConfig(
filename='workbench.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
message = 'Error: Configuration file "' + args.config + '" not found.'
logging.error(message)
sys.exit(message)
try:
with open(args.config, 'r') as f:
config_file_contents = f.read()
original_config_data = yaml.load(config_file_contents)
# Convert all keys to lower case.
config_data = collections.OrderedDict()
for k, v in original_config_data.items():
if isinstance(k, str):
k = k.lower()
config_data[k] = v
except YAMLError as e:
# Since the main logger gets its log file location from this file, we
# need to define a local logger to write to the default log file location,
# 'workbench.log'.
logging.basicConfig(
filename='workbench.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
message = 'Error: There appears to be a YAML syntax error with the configuration file "' + args.config + '". ' \
'\nIf you use an online YAML validator to find the error, *be sure to remove your Drupal hostname and user credentials first.*'
logging.exception(message)
sys.exit(message + "\n" + str(e))
config = {}
for k, v in config_data.items():
config[k] = v
# Set up defaults for some settings.
if 'input_dir' not in config:
config['input_dir'] = 'input_data'
if 'input_csv' not in config:
config['input_csv'] = 'metadata.csv'
if 'media_use_tid' not in config:
config['media_use_tid'] = 'http://pcdm.org/use#OriginalFile'
if 'drupal_filesystem' not in config:
config['drupal_filesystem'] = 'fedora://'
if 'id_field' not in config:
config['id_field'] = 'id'
if 'content_type' not in config:
config['content_type'] = 'islandora_object'
if 'delimiter' not in config:
config['delimiter'] = ','
if 'subdelimiter' not in config:
config['subdelimiter'] = '|'
if 'log_file_path' not in config:
config['log_file_path'] = 'workbench.log'
if 'log_file_mode' not in config:
config['log_file_mode'] = 'a'
if 'allow_missing_files' not in config:
config['allow_missing_files'] = False
if 'update_mode' not in config:
config['update_mode'] = 'replace'
if 'validate_title_length' not in config:
config['validate_title_length'] = True
if 'paged_content_from_directories' not in config:
config['paged_content_from_directories'] = False
if 'delete_media_with_nodes' not in config:
config['delete_media_with_nodes'] = True
if 'allow_adding_terms' not in config:
config['allow_adding_terms'] = False
if 'nodes_only' not in config:
config['nodes_only'] = False
if 'log_json' not in config:
config['log_json'] = False
if 'progress_bar' not in config:
config['progress_bar'] = False
if 'user_agent' not in config:
config['user_agent'] = 'Islandora Workbench'
if 'allow_redirects' not in config:
config['allow_redirects'] = True
if 'google_sheets_csv_filename' not in config:
config['google_sheets_csv_filename'] = 'google_sheet.csv'
if 'google_sheets_gid' not in config:
config['google_sheets_gid'] = '0'
if 'excel_worksheet' not in config:
config['excel_worksheet'] = 'Sheet1'
if 'excel_csv_filename' not in config:
config['excel_csv_filename'] = 'excel.csv'
if 'use_node_title_for_media' not in config:
config['use_node_title_for_media'] = False
if 'delete_tmp_upload' not in config:
config['delete_tmp_upload'] = False
if config['task'] == 'create':
if 'id_field' not in config:
config['id_field'] = 'id'
if config['task'] == 'create' or config['task'] == 'create_from_files':
if 'published' not in config:
config['published'] = 1
if config['task'] == 'create' or config['task'] == 'add_media' or config['task'] == 'create_from_files':
if 'preprocessors' in config_data:
config['preprocessors'] = {}
for preprocessor in config_data['preprocessors']:
for key, value in preprocessor.items():
config['preprocessors'][key] = value
if 'media_types' not in config:
config['media_types'] = []
image = collections.OrderedDict({'image': ['png', 'gif', 'jpg', 'jpeg']})
config['media_types'].append(image)
document = collections.OrderedDict({'document': ['pdf', 'doc', 'docx', 'ppt', 'pptx']})
config['media_types'].append(document)
file = collections.OrderedDict({'file': ['tif', 'tiff', 'jp2', 'zip', 'tar']})
config['media_types'].append(file)
audio = collections.OrderedDict({'audio': ['mp3', 'wav', 'aac']})
config['media_types'].append(audio)
video = collections.OrderedDict({'video': ['mp4']})
config['media_types'].append(video)
extracted_text = collections.OrderedDict({'extracted_text': ['txt']})
config['media_types'].append(extracted_text)
if config['task'] == 'create':
if 'paged_content_sequence_seprator' not in config:
config['paged_content_sequence_seprator'] = '-'
if 'paged_content_page_content_type' not in config:
config['paged_content_page_content_type'] = config['content_type']
if args.check:
config['check'] = True
else:
config['check'] = False
if args.get_csv_template:
config['get_csv_template'] = True
else:
config['get_csv_template'] = False
return config
def set_media_type(filepath, config):
"""Using configuration options, determine which media bundle type to use.
Options are either a single media type or a set of mappings from
file extenstion to media type.
"""
if 'media_type' in config:
return config['media_type']
extension_with_dot = os.path.splitext(filepath)[1]
extension = extension_with_dot[1:]
normalized_extension = extension.lower()
for types in config['media_types']:
for type, extensions in types.items():
if normalized_extension in extensions:
return type
# If extension isn't in one of the lists, default to 'file' bundle.
return 'file'
def set_model_from_extension(file_name, config):
"""Using configuration options, determine which Islandora Model value
to assign to nodes created from files. Options are either a single model
or a set of mappings from file extenstion to Islandora Model term ID.
"""
if config['task'] != 'create_from_files':
return None
if 'model' in config:
return config['model']
extension_with_dot = os.path.splitext(file_name)[1]
extension = extension_with_dot[1:]
normalized_extension = extension.lower()
for model_tids in config['models']:
for tid, extensions in model_tids.items():
if str(tid).startswith('http'):
tid = get_term_id_from_uri(config, tid)
if normalized_extension in extensions:
return tid
# If the file's extension is not listed in the config,
# We use the term ID that contains an empty extension.
if '' in extensions:
return tid
def issue_request(
config,
method,
path,
headers=dict(),
json='',
data='',
query={}):
"""Issue the HTTP request to Drupal.
"""
if config['check'] is False:
if 'pause' in config and method in ['POST', 'PUT', 'PATCH', 'DELETE']:
time.sleep(config['pause'])
headers.update({'User-Agent': config['user_agent']})
config['host'] = config['host'].rstrip('/')
if config['host'] in path:
url = path
else:
url = config['host'] + path
if method == 'GET':
response = requests.get(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
params=query,
headers=headers
)
if method == 'HEAD':
response = requests.head(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers
)
if method == 'POST':
if config['log_json'] is True:
logging.info(json)
response = requests.post(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'PUT':
if config['log_json'] is True:
logging.info(json)
response = requests.put(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'PATCH':
if config['log_json'] is True:
logging.info(json)
response = requests.patch(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'DELETE':
response = requests.delete(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers
)
return response
def ping_node(config, nid):
"""Ping the node to see if it exists.
"""
url = config['host'] + '/node/' + nid + '?_format=json'
response = issue_request(config, 'HEAD', url)
# @todo: Add 301 and 302 to the allowed status codes?
if response.status_code == 200:
return True
else:
logging.warning(
"Node ping (HEAD) on %s returned a %s status code",
url,
response.status_code)
return False
def ping_url_alias(config, url_alias):
"""Ping the URL alias to see if it exists. Return the status code.
"""
url = config['host'] + url_alias + '?_format=json'
response = issue_request(config, 'GET', url)
return response.status_code
def ping_islandora(config, print_message=True):
"""Connect to Islandora in prep for subsequent HTTP requests.
"""
# First, test a known request that requires Administrator-level permissions.
url = config['host'] + '/islandora_workbench_integration/upload_max_filesize'
try:
host_response = issue_request(config, 'GET', url)
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
config['host'] + '. Please verify the "host" setting in your configuration ' + \
'and check your network connection.'
logging.error(message)
logging.error(err_timeout)
sys.exit('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
config['host'] + '. Please verify the "host" setting in your configuration ' + \
'and check your network connection.'
logging.error(message)
logging.error(error_connection)
sys.exit('Error: ' + message)
if host_response.status_code == 404:
message = 'Workbench cannot detect whether the Islandora Workbench Integration module is ' + \
'enabled on ' + config['host'] + '. Please ensure it is enabled.'
logging.error(message)
sys.exit('Error: ' + message)
not_authorized = [401, 403]
if host_response.status_code in not_authorized:
message = 'Workbench can connect to ' + \
config['host'] + ' but the user "' + config['username'] + \
'" does not have sufficient permissions to continue, or the credentials are invalid.'
logging.error(message)
sys.exit('Error: ' + message)
message = "OK, connection to Drupal at " + config['host'] + " verified."
if print_message is True:
logging.info(message)
print(message)
def ping_remote_file(url):
'''Logging, exiting, etc. happens in caller, except on requests error.
'''
sections = urllib.parse.urlparse(url)
try:
response = requests.head(url, allow_redirects=True)
return response.status_code
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(err_timeout)
sys.exit('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(error_connection)
sys.exit('Error: ' + message)
def get_field_definitions(config):
"""Get field definitions from Drupal.
"""
ping_islandora(config, print_message=False)
# For media, entity_type will need to be 'media' and bundle_type will
# need to be one of 'image', 'document', 'audio', 'video', 'file'
entity_type = 'node'
bundle_type = config['content_type']
field_definitions = {}
fields = get_entity_fields(config, entity_type, bundle_type)
for fieldname in fields:
field_definitions[fieldname] = {}
raw_field_config = get_entity_field_config(config, fieldname, entity_type, bundle_type)
field_config = json.loads(raw_field_config)
field_definitions[fieldname]['entity_type'] = field_config['entity_type']
field_definitions[fieldname]['required'] = field_config['required']
field_definitions[fieldname]['label'] = field_config['label']
raw_vocabularies = [x for x in field_config['dependencies']['config'] if re.match("^taxonomy.vocabulary.", x)]
if len(raw_vocabularies) > 0:
vocabularies = [x.replace("taxonomy.vocabulary.", '')
for x in raw_vocabularies]
field_definitions[fieldname]['vocabularies'] = vocabularies
if entity_type == 'media' and 'file_extensions' in field_config['settings']:
field_definitions[fieldname]['file_extensions'] = field_config['settings']['file_extensions']
if entity_type == 'media':
field_definitions[fieldname]['media_type'] = bundle_type
raw_field_storage = get_entity_field_storage(config, fieldname, entity_type)
field_storage = json.loads(raw_field_storage)
field_definitions[fieldname]['field_type'] = field_storage['type']
field_definitions[fieldname]['cardinality'] = field_storage['cardinality']
if 'max_length' in field_storage['settings']:
field_definitions[fieldname]['max_length'] = field_storage['settings']['max_length']
else:
field_definitions[fieldname]['max_length'] = None
if 'target_type' in field_storage['settings']:
field_definitions[fieldname]['target_type'] = field_storage['settings']['target_type']
else:
field_definitions[fieldname]['target_type'] = None
if field_storage['type'] == 'typed_relation' and 'rel_types' in field_config['settings']:
field_definitions[fieldname]['typed_relations'] = field_config['settings']['rel_types']
field_definitions['title'] = {'entity_type': 'node', 'required': True, 'label': 'Title', 'field_type': 'string', 'cardinality': 1, 'max_length': 255, 'target_type': None}
return field_definitions
def get_entity_fields(config, entity_type, bundle_type):
"""Get all the fields configured on a bundle.
"""
fields_endpoint = config['host'] + '/entity/entity_form_display/' + \
entity_type + '.' + bundle_type + '.default?_format=json'
bundle_type_response = issue_request(config, 'GET', fields_endpoint)
fields = []
if bundle_type_response.status_code == 200:
node_config_raw = json.loads(bundle_type_response.text)
fieldname_prefix = 'field.field.node.' + bundle_type + '.'
fieldnames = [
field_dependency.replace(
fieldname_prefix,
'') for field_dependency in node_config_raw['dependencies']['config']]
for fieldname in node_config_raw['dependencies']['config']:
fieldname_prefix = 'field.field.' + entity_type + '.' + bundle_type + '.'
if re.match(fieldname_prefix, fieldname):
fieldname = fieldname.replace(fieldname_prefix, '')
fields.append(fieldname)
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
return fields
def get_entity_field_config(config, fieldname, entity_type, bundle_type):
"""Get a specific fields's configuration.
"""
field_config_endpoint = config['host'] + '/entity/field_config/' + \
entity_type + '.' + bundle_type + '.' + fieldname + '?_format=json'
field_config_response = issue_request(config, 'GET', field_config_endpoint)
if field_config_response.status_code == 200:
return field_config_response.text
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
def get_entity_field_storage(config, fieldname, entity_type):
"""Get a specific fields's storage configuration.
"""
field_storage_endpoint = config['host'] + '/entity/field_storage_config/' + \
entity_type + '.' + fieldname + '?_format=json'
field_storage_response = issue_request(
config, 'GET', field_storage_endpoint)
if field_storage_response.status_code == 200:
return field_storage_response.text
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
def check_input(config, args):
"""Validate the config file and input data.
"""
logging.info(
'Starting configuration check for "%s" task using config file %s.',
config['task'],
args.config)
ping_islandora(config, print_message=False)
base_fields = ['title', 'status', 'promote', 'sticky', 'uid', 'created']
# Check the config file.
tasks = [
'create',
'update',
'delete',
'add_media',
'delete_media',
'create_from_files']
joiner = ', '
if config['task'] not in tasks:
message = '"task" in your configuration file must be one of "create", "update", "delete", "add_media", or "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
config_keys = list(config.keys())
config_keys.remove('check')
# Check for presence of required config keys, which varies by task.
if config['task'] == 'create':
if config['nodes_only'] is True:
message = '"nodes_only" option in effect. Media files will not be checked/validated.'
print(message)
logging.info(message)
create_required_options = [
'task',
'host',
'username',
'password']
for create_required_option in create_required_options:
if create_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'update':
update_required_options = [
'task',
'host',
'username',
'password']
for update_required_option in update_required_options:
if update_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(update_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
update_mode_options = ['replace', 'append', 'delete']
if config['update_mode'] not in update_mode_options:
message = 'Your "update_mode" config option must be one of the following: ' \
+ joiner.join(update_mode_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
delete_required_options = [
'task',
'host',
'username',
'password']
for delete_required_option in delete_required_options:
if delete_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(delete_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
add_media_required_options = [
'task',
'host',
'username',
'password']
for add_media_required_option in add_media_required_options:
if add_media_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(add_media_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
delete_media_required_options = [
'task',
'host',
'username',
'password']
for delete_media_required_option in delete_media_required_options:
if delete_media_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(delete_media_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, configuration file has all required values (did not check for optional values).'
print(message)
logging.info(message)
# Check existence of CSV file.
if os.path.isabs(config['input_csv']):
input_csv = config['input_csv']
# The actual "extraction" is fired over in workbench.
elif config['input_csv'].startswith('http'):
input_csv = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
message = "Extracting CSV data from " + config['input_csv'] + " (worksheet gid " + str(config['google_sheets_gid']) + ") to " + input_csv + '.'
print(message)
logging.info(message)
elif config['input_csv'].endswith('xlsx'):
input_csv = os.path.join(config['input_dir'], config['excel_csv_filename'])
message = "Extracting CSV data from " + config['input_csv'] + " to " + input_csv + '.'
print(message)
logging.info(message)
else:
input_csv = os.path.join(config['input_dir'], config['input_csv'])
if os.path.exists(input_csv):
message = 'OK, CSV file ' + input_csv + ' found.'
print(message)
logging.info(message)
else:
message = 'CSV file ' + input_csv + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Check column headers in CSV file.
csv_data = get_csv_data(config)
csv_column_headers = csv_data.fieldnames
# Check whether each row contains the same number of columns as there are headers.
for count, row in enumerate(csv_data, start=1):
string_field_count = 0
for field in row:
if (row[field] is not None):
string_field_count += 1
if len(csv_column_headers) > string_field_count:
logging.error("Row %s of your CSV file does not " +
"have same number of columns (%s) as there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " +
str(count) +
" of your CSV file " +
"does not have same number of columns (" +
str(string_field_count) +
") as there are headers (" +
str(len(csv_column_headers)) +
").")
if len(csv_column_headers) < string_field_count:
logging.error("Row %s of your CSV file has more columns (%s) than there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " +
str(count) +
" of your CSV file " +
"has more columns (" + str(string_field_count) + ") than there are headers (" +
str(len(csv_column_headers)) +
").")
message = "OK, all " \
+ str(count) + " rows in the CSV file have the same number of columns as there are headers (" \
+ str(len(csv_column_headers)) + ")."
print(message)
logging.info(message)
# Task-specific CSV checks.
langcode_was_present = False
if config['task'] == 'create':
field_definitions = get_field_definitions(config)
if config['id_field'] not in csv_column_headers:
message = 'For "create" tasks, your CSV file must have a column containing a unique identifier.'
logging.error(message)
sys.exit('Error: ' + message)
if config['nodes_only'] is False and 'file' not in csv_column_headers and config['paged_content_from_directories'] is False:
message = 'For "create" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'title' not in csv_column_headers:
message = 'For "create" tasks, your CSV file must contain a "title" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'output_csv' in config.keys():
if os.path.exists(config['output_csv']):
message = 'Output CSV already exists at ' + \
config['output_csv'] + ', records will be appended to it.'
print(message)
logging.info(message)
if 'url_alias' in csv_column_headers:
validate_url_aliases_csv_data = get_csv_data(config)
validate_url_aliases(config, validate_url_aliases_csv_data)
# Specific to creating paged content. Current, if 'parent_id' is present
# in the CSV file, so must 'field_weight' and 'field_member_of'.
if 'parent_id' in csv_column_headers:
if ('field_weight' not in csv_column_headers or 'field_member_of' not in csv_column_headers):
message = 'If your CSV file contains a "parent_id" column, it must also contain "field_weight" and "field_member_of" columns.'
logging.error(message)
sys.exit('Error: ' + message)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if len(drupal_fieldnames) == 0:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
# We .remove() CSV column headers for this check because they are not Drupal field names (including 'langcode').
# Any new columns introduced into the CSV need to be removed here.
if config['id_field'] in csv_column_headers:
csv_column_headers.remove(config['id_field'])
if 'file' in csv_column_headers:
csv_column_headers.remove('file')
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
if 'parent_id' in csv_column_headers:
csv_column_headers.remove('parent_id')
if 'image_alt_text' in csv_column_headers:
csv_column_headers.remove('image_alt_text')
if 'url_alias' in csv_column_headers:
csv_column_headers.remove('url_alias')
# langcode is a standard Drupal field but it doesn't show up in any field configs.
if 'langcode' in csv_column_headers:
csv_column_headers.remove('langcode')
# Set this so we can validate langcode below.
langcode_was_present = True
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames and csv_column_header not in base_fields:
logging.error(
"CSV column header %s does not match any Drupal field names.",
csv_column_header)
sys.exit(
'Error: CSV column header "' +
csv_column_header +
'" does not match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
# Check that Drupal fields that are required are in the CSV file (create task only).
if config['task'] == 'create':
required_drupal_fields = []
for drupal_fieldname in field_definitions:
# In the create task, we only check for required fields that apply to nodes.
if 'entity_type' in field_definitions[drupal_fieldname] and field_definitions[
drupal_fieldname]['entity_type'] == 'node':
if 'required' in field_definitions[drupal_fieldname] and field_definitions[
drupal_fieldname]['required'] is True:
required_drupal_fields.append(drupal_fieldname)
for required_drupal_field in required_drupal_fields:
if required_drupal_field not in csv_column_headers:
logging.error(
"Required Drupal field %s is not present in the CSV file.",
required_drupal_field)
sys.exit(
'Error: Field "' +
required_drupal_field +
'" required for content type "' +
config['content_type'] +
'" is not present in the CSV file.')
message = 'OK, required Drupal fields are present in the CSV file.'
print(message)
logging.info(message)
# Validate dates in 'created' field, if present.
if 'created' in csv_column_headers:
validate_node_created_csv_data = get_csv_data(config)
validate_node_created_date(validate_node_created_csv_data)
# Validate user IDs in 'uid' field, if present.
if 'uid' in csv_column_headers:
validate_node_uid_csv_data = get_csv_data(config)
validate_node_uid(config, validate_node_uid_csv_data)
if config['task'] == 'update':
if 'node_id' not in csv_column_headers:
message = 'For "update" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'url_alias' in csv_column_headers:
validate_url_aliases_csv_data = get_csv_data(config)
validate_url_aliases(config, validate_url_aliases_csv_data)
field_definitions = get_field_definitions(config)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if 'title' in csv_column_headers:
csv_column_headers.remove('title')
if 'url_alias' in csv_column_headers:
csv_column_headers.remove('url_alias')
if 'image_alt_text' in csv_column_headers:
csv_column_headers.remove('image_alt_text')
if 'file' in csv_column_headers:
message = 'Error: CSV column header "file" is not allowed in update tasks.'
logging.error(message)
sys.exit(message)
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames:
logging.error(
'CSV column header %s does not match any Drupal field names.',
csv_column_header)
sys.exit(
'Error: CSV column header "' +
csv_column_header +
'" does not match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
if config['task'] == 'add_media' or config['task'] == 'create' and config['nodes_only'] is False:
validate_media_use_tid(config)
if config['task'] == 'update' or config['task'] == 'create':
validate_geolocation_values_csv_data = get_csv_data(config)
validate_geolocation_fields(config, field_definitions, validate_geolocation_values_csv_data)
validate_link_values_csv_data = get_csv_data(config)
validate_link_fields(config, field_definitions, validate_link_values_csv_data)
validate_edtf_values_csv_data = get_csv_data(config)
validate_edtf_fields(config, field_definitions, validate_edtf_values_csv_data)
validate_csv_field_cardinality_csv_data = get_csv_data(config)
validate_csv_field_cardinality(config, field_definitions, validate_csv_field_cardinality_csv_data)
validate_csv_field_length_csv_data = get_csv_data(config)
validate_csv_field_length(config, field_definitions, validate_csv_field_length_csv_data)
# Validating values in CSV taxonomy fields requires a View installed by the Islandora Workbench Integration module.
# If the View is not enabled, Drupal returns a 404. Use a dummy vocabulary ID or we'll get a 404 even if the View
# is enabled.
terms_view_url = config['host'] + '/vocabulary/dummyvid?_format=json'
terms_view_response = issue_request(config, 'GET', terms_view_url)
if terms_view_response.status_code == 404:
logging.warning(
'Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
print('Warning: Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
else:
validate_taxonomy_field_csv_data = get_csv_data(config)
warn_user_about_taxo_terms = validate_taxonomy_field_values(config, field_definitions, validate_taxonomy_field_csv_data)
if warn_user_about_taxo_terms is True:
print('Warning: Issues detected with validating taxonomy field values in the CSV file. See the log for more detail.')
validate_csv_typed_relation_values_csv_data = get_csv_data(config)
warn_user_about_typed_relation_terms = validate_typed_relation_field_values(config, field_definitions, validate_csv_typed_relation_values_csv_data)
if warn_user_about_typed_relation_terms is True:
print('Warning: Issues detected with validating typed relation field values in the CSV file. See the log for more detail.')
# Validate length of 'title'.
if config['validate_title_length']:
validate_title_csv_data = get_csv_data(config)
for count, row in enumerate(validate_title_csv_data, start=1):
if 'title' in row and len(row['title']) > 255:
message = "The 'title' column in row " + str(count) + " of your CSV file exceeds Drupal's maximum length of 255 characters."
logging.error(message)
sys.exit('Error: ' + message)
# Validate existence of nodes specified in 'field_member_of'. This could be generalized out to validate node IDs in other fields.
# See https://github.com/mjordan/islandora_workbench/issues/90.
validate_field_member_of_csv_data = get_csv_data(config)
for count, row in enumerate(
validate_field_member_of_csv_data, start=1):
if 'field_member_of' in csv_column_headers:
parent_nids = row['field_member_of'].split(
config['subdelimiter'])
for parent_nid in parent_nids:
if len(parent_nid) > 0:
parent_node_exists = ping_node(config, parent_nid)
if parent_node_exists is False:
message = "The 'field_member_of' field in row " + \
str(count) + " of your CSV file contains a node ID (" + parent_nid + ") that doesn't exist."
logging.error(message)
sys.exit('Error: ' + message)
# Validate 'langcode' values if that field exists in the CSV.
if langcode_was_present:
validate_langcode_csv_data = get_csv_data(config)
for count, row in enumerate(validate_langcode_csv_data, start=1):
langcode_valid = validate_language_code(row['langcode'])
if not langcode_valid:
message = "Row " + \
str(count) + " of your CSV file contains an invalid Drupal language code (" + row['langcode'] + ") in its 'langcode' column."
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
if 'node_id' not in csv_column_headers:
message = 'For "delete" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
if 'node_id' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'file' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
if 'media_id' not in csv_column_headers:
message = 'For "delete_media" tasks, your CSV file must contain a "media_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
# Check for existence of files listed in the 'file' column.
if (config['task'] == 'create' or config['task'] == 'add_media') and config['paged_content_from_directories'] is False:
file_check_csv_data = get_csv_data(config)
if config['nodes_only'] is False and config['allow_missing_files'] is False:
for count, file_check_row in enumerate(file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
message = 'Row ' + file_check_row[config['id_field']] + ' contains an empty "file" value.'
logging.error(message)
sys.exit('Error: ' + message)
file_check_row['file'] = file_check_row['file'].strip()
if file_check_row['file'].startswith('http'):
http_response_code = ping_remote_file(file_check_row['file'])
if http_response_code != 200 or ping_remote_file(file_check_row['file']) is False:
message = 'Remote file ' + file_check_row['file'] + ' identified in CSV "file" column for record with ID field value ' \
+ file_check_row[config['id_field']] + ' not found or not accessible (HTTP response code ' + str(http_response_code) + ').'
logging.error(message)
sys.exit('Error: ' + message)
if os.path.isabs(file_check_row['file']):
file_path = file_check_row['file']
else:
file_path = os.path.join(config['input_dir'], file_check_row['file'])
if not file_check_row['file'].startswith('http'):
if not os.path.exists(file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column for record with ID field value ' \
+ file_check_row[config['id_field']] + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
empty_file_values_exist = False
if config['nodes_only'] is False and config['allow_missing_files'] is True:
for count, file_check_row in enumerate(
file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
empty_file_values_exist = True
else:
file_path = os.path.join(
config['input_dir'], file_check_row['file'])
if not os.path.exists(
file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column not found.'
logging.error(message)
sys.exit('Error: ' + message)
if empty_file_values_exist is True:
message = 'OK, files named in the CSV "file" column are all present; the "allow_missing_files" option is enabled and empty "file" values exist.'
print(message)
logging.info(message)
else:
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
# @todo: check that each file's extension is allowed for the current media type usin get_registered_media_extensions().
# See https://github.com/mjordan/islandora_workbench/issues/126. Maybe also compare allowed extensions with those in
# 'media_type[s]' config option?
if config['task'] == 'create' and config['paged_content_from_directories'] is True:
if 'paged_content_page_model_tid' not in config:
message = 'If you are creating paged content, you must include "paged_content_page_model_tid" in your configuration.'
logging.error(
'Configuration requires "paged_content_page_model_tid" setting when creating paged content.')
sys.exit('Error: ' + message)
paged_content_from_directories_csv_data = get_csv_data(config)
for count, file_check_row in enumerate(
paged_content_from_directories_csv_data, start=1):
dir_path = os.path.join(
config['input_dir'], file_check_row[config['id_field']])
if not os.path.exists(dir_path) or os.path.isfile(dir_path):
message = 'Page directory ' + dir_path + ' for CSV record with ID "' \
+ file_check_row[config['id_field']] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
page_files = os.listdir(dir_path)
if len(page_files) == 0:
print(
'Warning: Page directory ' +
dir_path +
' is empty; is that intentional?')
logging.warning('Page directory ' + dir_path + ' is empty.')
for page_file_name in page_files:
if config['paged_content_sequence_seprator'] not in page_file_name:
message = 'Page file ' + os.path.join(
dir_path,
page_file_name) + ' does not contain a sequence separator (' + config['paged_content_sequence_seprator'] + ').'
logging.error(message)
sys.exit('Error: ' + message)
print('OK, page directories are all present.')
# If nothing has failed by now, exit with a positive, upbeat message.
print("Configuration and input data appear to be valid.")
logging.info('Configuration checked for "%s" task using config file %s, no problems found.', config['task'], args.config)
sys.exit(0)
def get_registered_media_extensions(field_definitions):
# Unfinished. See https://github.com/mjordan/islandora_workbench/issues/126.
for field_name, field_def in field_definitions.items():
print("Field name: " + field_name + ' / ' + str(field_def))
"""
print(field_def)
if field_def['entity_type'] == 'media':
if 'file_extensions' in field_def:
print('Allowed file extensions for ' + field_def['media_type'] + ' :' + field_def['file_extensions'])
else:
print("No file extensions for " + field_def['media_type'])
"""
def check_input_for_create_from_files(config, args):
"""Validate the config file and input data if task is 'create_from_files'.
"""
if config['task'] != 'create_from_files':
message = 'Your task must be "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
logging.info('Starting configuration check for "%s" task using config file %s.', config['task'], args.config)
ping_islandora(config, print_message=False)
config_keys = list(config.keys())
unwanted_in_create_from_files = [
'check',
'delimiter',
'subdelimiter',
'allow_missing_files',
'validate_title_length',
'paged_content_from_directories',
'delete_media_with_nodes',
'allow_adding_terms']
for option in unwanted_in_create_from_files:
if option in config_keys:
config_keys.remove(option)
# Check for presence of required config keys.
create_required_options = [
'task',
'host',
'username',
'password']
for create_required_option in create_required_options:
if create_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
# Check existence of input directory.
if os.path.exists(config['input_dir']):
message = 'OK, input directory "' + config['input_dir'] + '" found.'
print(message)
logging.info(message)
else:
message = 'Input directory "' + config['input_dir'] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Validate length of 'title'.
files = os.listdir(config['input_dir'])
for file_name in files:
filename_without_extension = os.path.splitext(file_name)[0]
if len(filename_without_extension) > 255:
message = 'The filename "' + filename_without_extension + \
'" exceeds Drupal\'s maximum length of 255 characters and cannot be used for a node title.'
logging.error(message)
sys.exit('Error: ' + message)
# Check that either 'model' or 'models' are present in the config file.
if ('model' not in config and 'models' not in config):
message = 'You must include either the "model" or "models" option in your configuration.'
logging.error(message)
sys.exit('Error: ' + message)
# If nothing has failed by now, exit with a positive message.
print("Configuration and input data appear to be valid.")
logging.info(
'Configuration checked for "%s" task using config file %s, no problems found.',
config['task'],
args.config)
sys.exit(0)
def log_field_cardinality_violation(field_name, record_id, cardinality):
"""Writes an entry to the log during create/update tasks if any field values
are sliced off. Workbench does this if the number of values in a field
exceeds the field's cardinality. record_id could be a value from the
configured id_field or a node ID.
"""
logging.warning(
"Adding all values in CSV field %s for record %s would exceed maximum " +
"number of allowed values (%s), so only adding first value.",
field_name,
record_id,
cardinality)
def validate_language_code(langcode):
# Drupal's language codes.
codes = ['af', 'am', 'ar', 'ast', 'az', 'be', 'bg', 'bn', 'bo', 'bs',
'ca', 'cs', 'cy', 'da', 'de', 'dz', 'el', 'en', 'en-x-simple', 'eo',
'es', 'et', 'eu', 'fa', 'fi', 'fil', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl',
'gsw-berne', 'gu', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'is', 'it',
'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'lo', 'lt', 'lv',
'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'nb', 'nn', 'oc',
'pa', 'pl', 'pt-pt', 'pt-br', 'ro', 'ru', 'sco', 'se', 'si', 'sk', 'sl',
'sq', 'sr', 'sv', 'sw', 'ta', 'ta-lk', 'te', 'th', 'tr', 'tyv', 'ug',
'uk', 'ur', 'vi', 'xx-lolspeak', 'zh-hans', 'zh-hant']
if langcode in codes:
return True
else:
return False
def clean_csv_values(row):
"""Strip leading and trailing whitespace from row values. Could be used in the
future for other normalization tasks.
"""
for field in row:
if isinstance(row[field], str):
row[field] = row[field].strip()
return row
def truncate_csv_value(field_name, record_id, field_config, value):
"""Drupal will not accept field values that have a length that
exceeds the configured maximum length for that field. 'value'
here is a field subvalue.
"""
if isinstance(value, str) and 'max_length' in field_config:
max_length = field_config['max_length']
if max_length is not None and len(value) > int(max_length):
original_value = value
value = value[:max_length]
logging.warning(
'CSV field value "%s" in field "%s" (record ID %s) truncated at %s characters as required by the field\'s configuration.',
original_value,
field_name,
record_id,
max_length)
return value
def get_node_field_values(config, nid):
"""Get a node's field data so we can use it during PATCH updates,
which replace a field's values.
"""
node_url = config['host'] + '/node/' + nid + '?_format=json'
response = issue_request(config, 'GET', node_url)
node_fields = json.loads(response.text)
return node_fields
def get_target_ids(node_field_values):
"""Get the target IDs of all entities in a field.
"""
target_ids = []
for target in node_field_values:
target_ids.append(target['target_id'])
return target_ids
def split_typed_relation_string(config, typed_relation_string, target_type):
"""Fields of type 'typed_relation' are represented in the CSV file
using a structured string, specifically namespace:property:id,
e.g., 'relators:pht:5'. 'id' is either a term ID or a node ID. This
function takes one of those strings (optionally with a multivalue
subdelimiter) and returns a list of dictionaries in the form they
take in existing node values.
Also, these values can (but don't need to) have an optional namespace
in the term ID segment, which is the vocabulary ID string. These
typed relation strings look like 'relators:pht:person:Jordan, Mark'.
However, since we split the typed relation strings only on the first
two :, we don't need to worry about what's in the third segment.
"""
return_list = []
temp_list = typed_relation_string.split(config['subdelimiter'])
for item in temp_list:
item_list = item.split(':', 2)
if value_is_numeric(item_list[2]):
target_id = int(item_list[2])
else:
target_id = item_list[2]
item_dict = {
'target_id': target_id,
'rel_type': item_list[0] + ':' + item_list[1],
'target_type': target_type}
return_list.append(item_dict)
return return_list
def split_geolocation_string(config, geolocation_string):
"""Fields of type 'geolocation' are represented in the CSV file using a
structured string, specifically lat,lng, e.g. "49.16667, -123.93333"
or "+49.16667, -123.93333". This function takes one of those strings
(optionally with a multivalue subdelimiter) and returns a list of
dictionaries with 'lat' and 'lng' keys required by the 'geolocation'
field type.
"""
return_list = []
temp_list = geolocation_string.split(config['subdelimiter'])
for item in temp_list:
item_list = item.split(',')
# Remove any leading \ which might be in value if it comes from a spreadsheet.
item_dict = {'lat': item_list[0].lstrip('\\').strip(), 'lng': item_list[1].lstrip('\\').strip()}
return_list.append(item_dict)
return return_list
def split_link_string(config, link_string):
"""Fields of type 'link' are represented in the CSV file using a structured string,
specifically uri%%title, e.g. "https://www.lib.sfu.ca%%SFU Library Website".
This function takes one of those strings (optionally with a multivalue subdelimiter)
and returns a list of dictionaries with 'uri' and 'title' keys required by the
'link' field type.
"""
return_list = []
temp_list = link_string.split(config['subdelimiter'])
for item in temp_list:
if '%%' in item:
item_list = item.split('%%')
item_dict = {'uri': item_list[0].strip(), 'title': item_list[1].strip()}
return_list.append(item_dict)
else:
# If there is no %% and title, use the URL as the title.
item_dict = {'uri': item.strip(), 'title': item.strip()}
return_list.append(item_dict)
return return_list
def validate_media_use_tid(config):
"""Validate whether the term ID or URI provided in the config value for media_use_tid is
in the Islandora Media Use vocabulary.
"""
if value_is_numeric(config['media_use_tid']) is not True and config['media_use_tid'].startswith('http'):
media_use_tid = get_term_id_from_uri(config, config['media_use_tid'])
if media_use_tid is False:
message = 'URI "' + \
config['media_use_tid'] + '" provided in configuration option "media_use_tid" does not match any taxonomy terms.'
logging.error(message)
sys.exit('Error: ' + message)
else:
# Confirm the tid exists and is in the islandora_media_use vocabulary
term_endpoint = config['host'] + '/taxonomy/term/' \
+ str(config['media_use_tid']) + '?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(config, 'GET', term_endpoint, headers)
if response.status_code == 404:
message = 'Term ID "' + \
str(config['media_use_tid']) + '" used in the "media_use_tid" configuration option is not a term ID (term doesn\'t exist).'
logging.error(message)
sys.exit('Error: ' + message)
if response.status_code == 200:
response_body = json.loads(response.text)
if 'vid' in response_body:
if response_body['vid'][0]['target_id'] != 'islandora_media_use':
message = 'Term ID "' + \
str(config['media_use_tid']) + '" provided in configuration option "media_use_tid" is not in the Islandora Media Use vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
def preprocess_field_data(subdelimiter, field_value, path_to_script):
"""Executes a field preprocessor script and returns its output and exit status code. The script
is passed the field subdelimiter as defined in the config YAML and the field's value, and
prints a modified vesion of the value (result) back to this function.
"""
cmd = subprocess.Popen(
[path_to_script, subdelimiter, field_value], stdout=subprocess.PIPE)
result, stderrdata = cmd.communicate()
return result, cmd.returncode
def execute_bootstrap_script(path_to_script, path_to_config_file):
"""Executes a bootstrap script and returns its output and exit status code.
@todo: pass config into script.
"""
cmd = subprocess.Popen(
[path_to_script, path_to_config_file], stdout=subprocess.PIPE)
result, stderrdata = cmd.communicate()
return result, cmd.returncode
def create_media(config, filename, node_uri, node_csv_row):
"""node_csv_row is an OrderedDict, e.g.
OrderedDict([('file', 'IMG_5083.JPG'), ('id', '05'), ('title', 'Alcatraz Island').
"""
if config['nodes_only'] is True:
return
is_remote = False
filename = filename.strip()
if filename.startswith('http'):
file_path = download_remote_file(config, filename, node_csv_row)
filename = file_path.split("/")[-1]
is_remote = True
elif os.path.isabs(filename):
file_path = filename
else:
file_path = os.path.join(config['input_dir'], filename)
mimetype = mimetypes.guess_type(file_path)
media_type = set_media_type(filename, config)
if value_is_numeric(config['media_use_tid']):
media_use_tid = config['media_use_tid']
if not value_is_numeric(config['media_use_tid']) and config['media_use_tid'].startswith('http'):
media_use_tid = get_term_id_from_uri(config, config['media_use_tid'])
media_endpoint_path = '/media/' + media_type + '/' + str(media_use_tid)
media_endpoint = node_uri + media_endpoint_path
location = config['drupal_filesystem'] + os.path.basename(filename)
media_headers = {
'Content-Type': mimetype[0],
'Content-Location': location
}
binary_data = open(file_path, 'rb')
media_response = issue_request(config, 'PUT', media_endpoint, media_headers, '', binary_data)
if is_remote and config['delete_tmp_upload'] is True:
containing_folder = os.path.join(config['input_dir'], re.sub('[^A-Za-z0-9]+', '_', node_csv_row[config['id_field']]))
shutil.rmtree(containing_folder)
if media_response.status_code == 201:
if 'location' in media_response.headers:
# A 201 response provides a 'location' header, but a '204' response does not.
media_uri = media_response.headers['location']
logging.info(
"Media (%s) created at %s, linked to node %s.",
media_type,
media_uri,
node_uri)
media_id = media_uri.rsplit('/', 1)[-1]
patch_media_fields(config, media_id, media_type, node_csv_row)
if media_type == 'image':
patch_image_alt_text(config, media_id, node_csv_row)
elif media_response.status_code == 204:
logging.warning(
"Media created and linked to node %s, but its URI is not available since its creation returned an HTTP status code of %s",
node_uri,
media_response.status_code)
logging.warning(
"Media linked to node %s base fields not updated.",
node_uri)
else:
logging.error(
'Media not created, PUT request to "%s" returned an HTTP status code of "%s".',
media_endpoint,
media_response.status_code)
binary_data.close()
return media_response.status_code
def patch_media_fields(config, media_id, media_type, node_csv_row):
"""Patch the media entity with base fields from the parent node.
"""
media_json = {
'bundle': [
{'target_id': media_type}
]
}
for field_name, field_value in node_csv_row.items():
if field_name == 'created' and len(field_value) > 0:
media_json['created'] = [{'value': field_value}]
if field_name == 'uid' and len(field_value) > 0:
media_json['uid'] = [{'target_id': field_value}]
if len(media_json) > 1:
endpoint = config['host'] + '/media/' + media_id + '?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(config, 'PATCH', endpoint, headers, media_json)
if response.status_code == 200:
logging.info(
"Media %s fields updated to match parent node's.", config['host'] + '/media/' + media_id)
else:
logging.warning(
"Media %s fields not updated to match parent node's.", config['host'] + '/media/' + media_id)
def patch_image_alt_text(config, media_id, node_csv_row):
"""Patch the alt text value for an image media. Use the parent node's title
unless the CSV record contains an image_alt_text field with something in it.
"""
get_endpoint = config['host'] + '/media/' + media_id + '?_format=json'
get_headers = {'Content-Type': 'application/json'}
get_response = issue_request(config, 'GET', get_endpoint, get_headers)
get_response_body = json.loads(get_response.text)
field_media_image_target_id = get_response_body['field_media_image'][0]['target_id']
for field_name, field_value in node_csv_row.items():
if field_name == 'title':
# Strip out HTML markup to guard against CSRF in alt text.
alt_text = re.sub('<[^<]+?>', '', field_value)
if field_name == 'image_alt_text' and len(field_value) > 0:
alt_text = re.sub('<[^<]+?>', '', field_value)
media_json = {
'bundle': [
{'target_id': 'image'}
],
'field_media_image': [
{"target_id": field_media_image_target_id, "alt": alt_text}
],
}
patch_endpoint = config['host'] + '/media/' + media_id + '?_format=json'
patch_headers = {'Content-Type': 'application/json'}
patch_response = issue_request(
config,
'PATCH',
patch_endpoint,
patch_headers,
media_json)
if patch_response.status_code != 200:
logging.warning(
"Alt text for image media %s not updated.",
config['host'] + '/media/' + media_id)
def remove_media_and_file(config, media_id):
"""Delete a media and the file associated with it.
"""
# First get the media JSON.
get_media_url = '/media/' + str(media_id) + '?_format=json'
get_media_response = issue_request(config, 'GET', get_media_url)
get_media_response_body = json.loads(get_media_response.text)
# These are the Drupal field names on the various types of media.
file_fields = [
'field_media_file',
'field_media_image',
'field_media_document',
'field_media_audio_file',
'field_media_video_file']
for file_field_name in file_fields:
if file_field_name in get_media_response_body:
file_id = get_media_response_body[file_field_name][0]['target_id']
break
# Delete the file first.
file_endpoint = config['host'] + '/entity/file/' + str(file_id) + '?_format=json'
file_response = issue_request(config, 'DELETE', file_endpoint)
if file_response.status_code == 204:
logging.info("File %s (from media %s) deleted.", file_id, media_id)
else:
logging.error(
"File %s (from media %s) not deleted (HTTP response code %s).",
file_id,
media_id,
file_response.status_code)
# Then the media.
if file_response.status_code == 204:
media_endpoint = config['host'] + '/media/' + str(media_id) + '?_format=json'
media_response = issue_request(config, 'DELETE', media_endpoint)
if media_response.status_code == 204:
logging.info("Media %s deleted.", media_id)
return media_response.status_code
else:
logging.error(
"Media %s not deleted (HTTP response code %s).",
media_id,
media_response.status_code)
return False
return False
# @lru_cache(maxsize=None)
def get_csv_data(config):
"""Read the input CSV data and prepare it for use in create, update, etc. tasks.
This function reads the source CSV file (or the CSV dump from Google Sheets or Excel),
applies some prepocessing to each CSV record (specifically, it adds any CSV field
templates that are registered in the config file, and it filters out any CSV
records or lines in the CSV file that begine with a #), and finally, writes out
a version of the CSV data to a file that appends .prepocessed to the input
CSV file name. It is this .prepocessed file that is used in create, update, etc.
tasks.
"""
if os.path.isabs(config['input_csv']):
input_csv_path = config['input_csv']
elif config['input_csv'].startswith('http') is True:
input_csv_path = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
elif config['input_csv'].endswith('.xlsx') is True:
input_csv_path = os.path.join(config['input_dir'], config['excel_csv_filename'])
else:
input_csv_path = os.path.join(config['input_dir'], config['input_csv'])
if not os.path.exists(input_csv_path):
message = 'Error: CSV file ' + input_csv_path + ' not found.'
logging.error(message)
sys.exit(message)
try:
csv_reader_file_handle = open(input_csv_path, 'r', encoding="utf-8", newline='')
except (UnicodeDecodeError):
message = 'Error: CSV file ' + input_csv_path + ' must be encoded in ASCII or UTF-8.'
logging.error(message)
sys.exit(message)
csv_writer_file_handle = open(input_csv_path + '.prepocessed', 'w+', newline='')
csv_reader = csv.DictReader(csv_reader_file_handle, delimiter=config['delimiter'])
csv_reader_fieldnames = csv_reader.fieldnames
tasks = ['create', 'update']
if config['task'] in tasks and 'csv_field_templates' in config and len(config['csv_field_templates']) > 0:
# If the config file contains CSV field templates, append them to the CSV data.
# Make a copy of the column headers so we can skip adding templates to the new CSV
# if they're present in the source CSV. We don't want fields in the source CSV to be
# stomped on by templates.
csv_reader_fieldnames_orig = copy.copy(csv_reader_fieldnames)
for template in config['csv_field_templates']:
for field_name, field_value in template.items():
if field_name not in csv_reader_fieldnames_orig:
csv_reader_fieldnames.append(field_name)
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=csv_reader_fieldnames)
csv_writer.writeheader()
row_num = 0
unique_identifiers = []
for row in csv_reader:
row_num += 1
for template in config['csv_field_templates']:
for field_name, field_value in template.items():
if field_name not in csv_reader_fieldnames_orig:
row[field_name] = field_value
# Skip CSV records whose first column begin with #.
if not list(row.values())[0].startswith('#'):
try:
unique_identifiers.append(row[config['id_field']])
csv_writer.writerow(row)
except (ValueError):
message = "Error: Row " + str(row_num) + ' in your CSV file ' + \
"has more columns (" + str(len(row)) + ") than there are headers (" + \
str(len(csv_reader.fieldnames)) + ').'
logging.error(message)
sys.exit(message)
repeats = set(([x for x in unique_identifiers if unique_identifiers.count(x) > 1]))
if len(repeats) > 0:
message = "duplicated identifiers found: " + str(repeats)
logging.error(message)
sys.exit(message)
else:
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=csv_reader_fieldnames)
csv_writer.writeheader()
row_num = 0
for row in csv_reader:
row_num += 1
# Skip CSV records whose first column begin with #.
if not list(row.values())[0].startswith('#'):
try:
csv_writer.writerow(row)
except (ValueError):
message = "Error: Row " + str(row_num) + ' in your CSV file ' + \
"has more columns (" + str(len(row)) + ") than there are headers (" + \
str(len(csv_reader.fieldnames)) + ').'
logging.error(message)
sys.exit(message)
csv_writer_file_handle.close()
preprocessed_csv_reader_file_handle = open(input_csv_path + '.prepocessed', 'r')
preprocessed_csv_reader = csv.DictReader(preprocessed_csv_reader_file_handle, delimiter=config['delimiter'])
return preprocessed_csv_reader
def get_term_pairs(config, vocab_id):
"""Get all the term IDs plus associated term names in a vocabulary. If
the vocabulary does not exist, or is not registered with the view, the
request to Drupal returns a 200 plus an empty JSON list, i.e., [].
"""
term_dict = dict()
# Note: this URL requires the view "Terms in vocabulary", created by the
# Islandora Workbench Integation module, to present on the target
# Islandora.
vocab_url = config['host'] + '/vocabulary/' + vocab_id + '?_format=json'
response = issue_request(config, 'GET', vocab_url)
vocab = json.loads(response.text)
for term in vocab:
name = term['name'][0]['value']
tid = term['tid'][0]['value']
term_dict[tid] = name
return term_dict
def find_term_in_vocab(config, vocab_id, term_name_to_find):
"""For a given term name, loops through all term names in vocab_id
to see if term is there already. If so, returns term ID; if not
returns False.
"""
terms_in_vocab = get_term_pairs(config, vocab_id)
for tid, term_name in terms_in_vocab.items():
match = compare_strings(term_name, term_name_to_find)
if match:
return tid
# None matched.
return False
def get_term_id_from_uri(config, uri):
"""For a given URI, query the Term from URI View created by the Islandora
Workbench Integration module. Because we don't know which field each
taxonomy uses to store URIs (it's either field_external_uri or field_authority_link),
we need to check both options in the "Term from URI" View.
"""
# Some vocabuluaries use this View.
terms_with_uri = []
term_from_uri_url = config['host'] \
+ '/term_from_uri?_format=json&uri=' + uri.replace('#', '%23')
term_from_uri_response = issue_request(config, 'GET', term_from_uri_url)
if term_from_uri_response.status_code == 200:
term_from_uri_response_body_json = term_from_uri_response.text
term_from_uri_response_body = json.loads(
term_from_uri_response_body_json)
if len(term_from_uri_response_body) == 1:
tid = term_from_uri_response_body[0]['tid'][0]['value']
return tid
if len(term_from_uri_response_body) > 1:
for term in term_from_uri_response_body:
terms_with_uri.append(
{term['tid'][0]['value']: term['vid'][0]['target_id']})
tid = term_from_uri_response_body[0]['tid'][0]['value']
print("Warning: See log for important message about use of term URIs.")
logging.warning(
'Term URI "%s" is used for more than one term (with these term ID/vocabulary ID combinations: ' +
str(terms_with_uri) +
'). Workbench is choosing the first term ID (%s)).',
uri,
tid)
return tid
# And some vocabuluaries use this View.
term_from_authority_link_url = config['host'] + \
'/term_from_authority_link?_format=json&authority_link=' + uri.replace('#', '%23')
term_from_authority_link_response = issue_request(
config, 'GET', term_from_authority_link_url)
if term_from_authority_link_response.status_code == 200:
term_from_authority_link_response_body_json = term_from_authority_link_response.text
term_from_authority_link_response_body = json.loads(
term_from_authority_link_response_body_json)
if len(term_from_authority_link_response_body) == 1:
tid = term_from_authority_link_response_body[0]['tid'][0]['value']
return tid
elif len(term_from_authority_link_response_body) > 1:
for term in term_from_authority_link_response_body:
terms_with_uri.append(
{term['tid'][0]['value']: term['vid'][0]['target_id']})
tid = term_from_authority_link_response_body[0]['tid'][0]['value']
print("Warning: See log for important message about use of term URIs.")
logging.warning(
'Term URI "%s" is used for more than one term (with these term ID/vocabulary ID combinations: ' +
str(terms_with_uri) +
'). Workbench is choosing the first term ID (%s)).',
uri,
tid)
return tid
else:
# URI does not match any term.
return False
# Non-200 response code.
return False
def create_term(config, vocab_id, term_name):
"""Adds a term to the target vocabulary. Returns the new term's ID
if successful (if the term already exists) or False if not.
"""
# Check to see if term exists; if so, return its ID, if not, proceed to
# create it.
tid = find_term_in_vocab(config, vocab_id, term_name)
if value_is_numeric(tid):
logging.info(
'Term "%s" (term ID %s) already exists in vocabulary "%s".',
term_name,
tid,
vocab_id)
return tid
if config['allow_adding_terms'] is False:
logging.warning(
'To create new taxonomy terms, you must add "allow_adding_terms: true" to your configuration file.')
return False
if len(term_name) > 255:
truncated_term_name = term_name[:255]
message = 'Term "' + term_name + '"' + \
"provided in the CSV data exceeds Drupal's maximum length of 255 characters."
message_2 = ' It has been trucated to "' + truncated_term_name + '".'
logging.info(message + message_2)
term_name = truncated_term_name
term = {
"vid": [
{
"target_id": vocab_id,
"target_type": "taxonomy_vocabulary"
}
],
"status": [
{
"value": True
}
],
"name": [
{
"value": term_name
}
],
"description": [
{
"value": "",
"format": None
}
],
"weight": [
{
"value": 0
}
],
"parent": [
{
"target_id": None
}
],
"default_langcode": [
{
"value": True
}
],
"path": [
{
"alias": None,
"pid": None,
"langcode": "en"
}
]
}
term_endpoint = config['host'] + '/taxonomy/term?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(
config,
'POST',
term_endpoint,
headers,
term,
None)
if response.status_code == 201:
term_response_body = json.loads(response.text)
tid = term_response_body['tid'][0]['value']
logging.info(
'Term %s ("%s") added to vocabulary "%s".',
tid,
term_name,
vocab_id)
return tid
else:
logging.warning(
"Term '%s' not created, HTTP response code was %s.",
term_name,
response.status_code)
return False
def create_url_alias(config, node_id, url_alias):
json = {'path': [
{'value': '/node/' + str(node_id)}
],
'alias': [
{'value': url_alias}
]
}
headers = {'Content-Type': 'application/json'}
response = issue_request(
config,
'POST',
config['host'] +
'/entity/path_alias?_format=json',
headers,
json,
None)
if response.status_code != 201:
logging.error(
"URL alias '%s' not created for node %s, HTTP response code was %s (it might already exist).",
url_alias,
config['host'] +
'/node/' +
node_id,
response.status_code)
def prepare_term_id(config, vocab_ids, term):
"""REST POST and PATCH operations require taxonomy term IDs, not term names. This
funtion checks its 'term' argument to see if it's numeric (i.e., a term ID) and
if it is, returns it as is. If it's not (i.e., a term name) it looks for the
term name in the referenced vocabulary and returns its term ID (existing or
newly created).
"""
term = str(term)
term = term.strip()
if value_is_numeric(term):
return term
# Special case: if the term starts with 'http', assume it's a Linked Data URI
# and get its term ID from the URI.
elif term.startswith('http'):
# Note: get_term_from_uri() will return False if the URI doesn't match
# a term.
tid_from_uri = get_term_id_from_uri(config, term)
if value_is_numeric(tid_from_uri):
return tid_from_uri
else:
if len(vocab_ids) == 1:
tid = create_term(config, vocab_ids[0].strip(), term.strip())
return tid
else:
# Term names used in mult-taxonomy fields. They need to be namespaced with
# the taxonomy ID.
#
# If the field has more than one vocabulary linked to it, we don't know which
# vocabulary the user wants a new term to be added to, and if the term name is
# already used in any of the taxonomies linked to this field, we also don't know
# which vocabulary to look for it in to get its term ID. Therefore, we always need
# to namespace term names if they are used in multi-taxonomy fields. If people want
# to use term names that contain a colon, they need to add them to Drupal first
# and use the term ID. Workaround PRs welcome.
#
# Split the namespace/vocab ID from the term name on ':'.
namespaced = re.search(':', term)
if namespaced:
[vocab_id, term_name] = term.split(':')
tid = create_term(config, vocab_id.strip(), term_name.strip())
return tid
def get_field_vocabularies(config, field_definitions, field_name):
"""Gets IDs of vocabularies linked from the current field (could be more than one).
"""
if 'vocabularies' in field_definitions[field_name]:
vocabularies = field_definitions[field_name]['vocabularies']
return vocabularies
else:
return False
def value_is_numeric(value):
"""Tests to see if value is numeric.
"""
var = str(value)
var = var.strip()
if var.isnumeric():
return True
else:
return False
def compare_strings(known, unknown):
"""Normalizes the unknown string and the known one, and compares
them. If they match, returns True, if not, False. We could
use FuzzyWuzzy or something but this is probably sufficient.
"""
# Strips leading and trailing whitespace.
known = known.strip()
unknown = unknown.strip()
# Converts to lower case.
known = known.lower()
unknown = unknown.lower()
# Remove all punctuation.
for p in string.punctuation:
known = known.replace(p, ' ')
unknown = unknown.replace(p, ' ')
# Replaces whitespace with a single space.
known = " ".join(known.split())
unknown = " ".join(unknown.split())
if unknown == known:
return True
else:
return False
def get_csv_record_hash(row):
"""Concatenate values in the CSV record and get an MD5 hash on the
resulting string.
"""
serialized_row = ''
for field in row:
if isinstance(row[field], str) or isinstance(row[field], int):
if isinstance(row[field], int):
row[field] = str(row[field])
row_value = row[field].strip()
row_value = " ".join(row_value.split())
serialized_row = serialized_row + row_value + " "
serialized_row = bytes(serialized_row.strip().lower(), 'utf-8')
hash_object = hashlib.md5(serialized_row)
return hash_object.hexdigest()
def validate_csv_field_cardinality(config, field_definitions, csv_data):
"""Compare values in the CSV data with the fields' cardinality. Log CSV
fields that have more values than allowed, and warn user if
these fields exist in their CSV data.
"""
field_cardinalities = dict()
csv_headers = csv_data.fieldnames
for csv_header in csv_headers:
if csv_header in field_definitions.keys():
cardinality = field_definitions[csv_header]['cardinality']
# We don't care about cardinality of -1 (unlimited).
if int(cardinality) > 0:
field_cardinalities[csv_header] = cardinality
for count, row in enumerate(csv_data, start=1):
for field_name in field_cardinalities.keys():
if field_name in row:
# Don't check for the subdelimiter in title.
if field_name == 'title':
continue
delimited_field_values = row[field_name].split(config['subdelimiter'])
if field_cardinalities[field_name] == 1 and len(delimited_field_values) > 1:
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in record with ID ' + \
row[config['id_field']] + ' contains more values than the number '
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' \
+ row['node_id'] + ' contains more values than the number '
message_2 = 'allowed for that field (' + str(
field_cardinalities[field_name]) + '). Workbench will add only the first value.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
if int(field_cardinalities[field_name]) > 1 and len(delimited_field_values) > field_cardinalities[field_name]:
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in record with ID ' + \
row[config['id_field']] + ' contains more values than the number '
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' \
+ row['node_id'] + ' contains more values than the number '
message_2 = 'allowed for that field (' + str(
field_cardinalities[field_name]) + '). Workbench will add only the first ' + str(
field_cardinalities[field_name]) + ' values.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
def validate_csv_field_length(config, field_definitions, csv_data):
"""Compare values in the CSV data with the fields' max_length. Log CSV
fields that exceed their max_length, and warn user if
these fields exist in their CSV data.
"""
field_max_lengths = dict()
csv_headers = csv_data.fieldnames
for csv_header in csv_headers:
if csv_header in field_definitions.keys():
if 'max_length' in field_definitions[csv_header]:
max_length = field_definitions[csv_header]['max_length']
# We don't care about max_length of None (i.e., it's
# not applicable or unlimited).
if max_length is not None:
field_max_lengths[csv_header] = max_length
for count, row in enumerate(csv_data, start=1):
for field_name in field_max_lengths.keys():
if field_name in row:
delimited_field_values = row[field_name].split(
config['subdelimiter'])
for field_value in delimited_field_values:
field_value_length = len(field_value)
if field_name in field_max_lengths and len(field_value) > int(field_max_lengths[field_name]):
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in record with ID ' + \
row[config['id_field']] + ' contains a value that is longer (' + str(len(field_value)) + ' characters)'
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' + \
row['node_id'] + ' contains a value that is longer (' + str(len(field_value)) + ' characters)'
message_2 = ' than allowed for that field (' + str(
field_max_lengths[field_name]) + ' characters). Workbench will truncate this value prior to populating Drupal.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
def validate_geolocation_fields(config, field_definitions, csv_data):
"""Validate lat,long values in fields that are of type 'geolocation'.
"""
geolocation_fields_present = False
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'geolocation':
if field_name in row:
geolocation_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value.strip()):
if not validate_latlong_value(field_value.strip()):
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' (' + field_value + ') is not a valid lat,long pair.'
logging.error(message)
sys.exit('Error: ' + message)
if geolocation_fields_present is True:
message = "OK, geolocation field values in the CSV file validate."
print(message)
logging.info(message)
def validate_link_fields(config, field_definitions, csv_data):
"""Validate lat,long values in fields that are of type 'geolocation'.
"""
link_fields_present = False
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'link':
if field_name in row:
link_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value.strip()):
if not validate_link_value(field_value.strip()):
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' (' + field_value + ') is not a valid link field value.'
logging.error(message)
sys.exit('Error: ' + message)
if link_fields_present is True:
message = "OK, link field values in the CSV file validate."
print(message)
logging.info(message)
def validate_latlong_value(latlong):
# Remove leading \ that may be present if input CSV is from a spreadsheet.
latlong = latlong.lstrip('\\')
if re.match(r"^[-+]?([1-8]?\d(\.\d+)?|90(\.0+)?),\s*[-+]?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$", latlong):
return True
else:
return False
def validate_link_value(link_value):
if re.match(r"^http://.+(%%.+)?$", link_value):
return True
else:
return False
def validate_term_name_length(term_name, row_number, column_name):
"""Checks that the length of a term name does not exceed
Drupal's 255 character length.
"""
term_name = term_name.strip()
if len(term_name) > 255:
message = 'CSV field "' + column_name + '" in record ' + row_number + \
" contains a taxonomy term that exceeds Drupal's limit of 255 characters (length of term is " + str(len(term_name)) + ' characters).'
message_2 = ' Term provided in CSV is "' + term_name + '".'
message_3 = " Please reduce the term's length to less than 256 characters."
logging.error(message + message_2 + message_3)
sys.exit(
'Error: ' +
message +
' See the Workbench log for more information.')
def validate_node_created_date(csv_data):
"""Checks that date_string is in the format used by Drupal's 'created' node property,
e.g., 2020-11-15T23:49:22+00:00. Also check to see if the date is in the future.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'created' and len(field_value) > 0:
# matches = re.match(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d[+-]\d\d:\d\d$', field_value)
if not validate_node_created_date_string(field_value):
message = 'CSV field "created" in record ' + \
str(count) + ' contains a date "' + field_value + '" that is not formatted properly.'
logging.error(message)
sys.exit('Error: ' + message)
now = datetime.datetime.now()
# Remove the GMT differential at the end of the time string.
date_string_trimmed = re.sub(
r'[+-]\d\d:\d\d$', '', field_value)
created_date = datetime.datetime.strptime(
date_string_trimmed, '%Y-%m-%dT%H:%M:%S')
if created_date > now:
message = 'CSV field "created" in record ' + \
str(count) + ' contains a date "' + field_value + '" that is in the future.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, dates in the "created" CSV field are all formated correctly and in the future.'
print(message)
logging.info(message)
def validate_node_created_date_string(created_date_string):
if re.match(r"^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d[+-]\d\d:\d\d$", created_date_string):
return True
else:
return False
def validate_edtf_fields(config, field_definitions, csv_data):
"""Validate values in fields that are of type 'edtf'.
"""
edtf_fields_present = False
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'edtf':
if field_name in row:
edtf_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value.strip()):
result, validation_message = validate_edtf_value(field_value)
if result is False:
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' ("' + field_value + '") is not a valid EDTF date/time.' + ' ' + validation_message
logging.error(message)
sys.exit('Error: ' + message)
if edtf_fields_present is True:
message = "OK, ETDF field values in the CSV file validate."
print(message)
logging.info(message)
def validate_edtf_value(edtf):
edtf = edtf.strip()
# Value contains an EDTF interval, e.g. ‘1964/2008’
if '/' in edtf:
interval_dates = edtf.split('/', 1)
for interval_date in interval_dates:
result, message = validate_single_edtf_date(interval_date)
if result is False:
return False, 'Interval date "' + interval_date + '"" does not validate.' + ' ' + message
# If we've made it this far, return True.
return True, None
# Value is an EDTF set if it contains a , or .., so it must start with a [ and ends with a ].
elif edtf.count('.') == 2 or ',' in edtf:
if not (edtf.startswith('[') and edtf.endswith(']')):
return False, 'Date set "' + edtf + '" does not contain a leading [ and/or trailing ].'
# Value contains an EDTF set, e.g. '[1667,1668,1670..1672]'.
if '[' in edtf:
edtf = edtf.lstrip('[')
edtf = edtf.rstrip(']')
if '..' in edtf or ',' in edtf:
# .. is at beginning of set, e.g. ..1760-12-03
if edtf.startswith('..'):
edtf = edtf.lstrip('..')
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'Set date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
if edtf.endswith('..'):
edtf = edtf.rstrip('..')
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'Set date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
set_date_boundaries = re.split(r'\.\.|,', edtf)
for set_date_boundary in set_date_boundaries:
result, message = validate_single_edtf_date(set_date_boundary)
if result is False:
return False, 'Set date "' + set_date_boundary + '"" does not validate.' + ' ' + message
# If we've made it this far, return True.
return True, None
# Assume value is just a single EDTF date.
else:
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'EDTF date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
def validate_single_edtf_date(single_edtf):
if 'T' in single_edtf:
# if re.search(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', single_edtf):
if re.search(r'^\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\d)?$', single_edtf):
return True, None
else:
return False, '"' + single_edtf + '" is an invalid EDTF date and local time value.'
if re.search(r'#|\?|~', single_edtf):
parts = single_edtf.split('-')
if parts[0] is not None and re.search('~|%', parts[0]):
return False, 'Invalid date qualifier in "' + parts[0] + ", must be a ?."
if len(parts) == 2 and re.search(r'\?|%', parts[1]):
return False, 'Invalid date qualifier in "' + parts[1] + ", must be a ~."
if len(parts) == 3 and re.search(r'\?|~', parts[2]):
return False, 'Invalid date qualifier in "' + parts[2] + ", must be a %."
for symbol in '%~?':
single_edtf = single_edtf.replace(symbol, '')
if re.search(r'^\d{4}-?(\d\d)?-?(\d\d)?$', single_edtf):
valid_calendar_date = validate_calendar_date(single_edtf)
if valid_calendar_date is False:
return False, '"' + single_edtf + '" is not a valid calendar date.'
return True, None
else:
return False, single_edtf + " is not a valid EDTF date value."
def validate_calendar_date(date_to_validate):
"""Checks to see if date (yyyy, yyy-mm, or yyyy-mm-dd) is a
valid Gregorian calendar date.
"""
parts = str(date_to_validate).split('-')
if len(parts) == 3:
year = parts[0]
month = parts[1]
day = parts[2]
if len(parts) == 2:
year = parts[0]
month = parts[1]
day = 1
if len(parts) == 1:
year = parts[0]
month = 1
day = 1
try:
datetime.date(int(year), int(month), int(day))
return True
except ValueError:
return False
def validate_url_aliases(config, csv_data):
"""Checks that URL aliases don't already exist.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'url_alias' and len(field_value) > 0:
if field_value.strip()[0] != '/':
message = 'CSV field "url_alias" in record ' + \
str(count) + ' contains an alias "' + field_value + '" that is missing its leading /.'
logging.error(message)
sys.exit('Error: ' + message)
alias_ping = ping_url_alias(config, field_value)
# @todo: Add 301 and 302 as acceptable status codes?
if alias_ping == 200:
message = 'CSV field "url_alias" in record ' + \
str(count) + ' contains an alias "' + field_value + '" that already exists.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, URL aliases do not already exist.'
print(message)
logging.info(message)
def validate_node_uid(config, csv_data):
"""Checks that the user identified in the 'uid' field exists in Drupal. Note that
this does not validate any permissions the user may have.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'uid' and len(field_value) > 0:
# Request to /user/x?_format=json goes here; 200 means the user
# exists, 404 means they do no.
uid_url = config['host'] + '/user/' + \
str(field_value) + '?_format=json'
uid_response = issue_request(config, 'GET', uid_url)
if uid_response.status_code == 404:
message = 'CSV field "uid" in record ' + \
str(count) + ' contains a user ID "' + field_value + '" that does not exist in the target Drupal.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, user IDs in the "uid" CSV field all exist.'
print(message)
logging.info(message)
def validate_taxonomy_field_values(config, field_definitions, csv_data):
"""Loop through all fields in field_definitions, and if a field
is a taxonomy reference field, validate all values in the CSV
data in that field against term IDs in the taxonomies referenced
by the field. Does not validate Typed Relation fields
(see validate_typed_relation_field_values()).
"""
# Define a dictionary to store CSV field: term IDs mappings.
fields_with_vocabularies = dict()
vocab_validation_issues = False
# Get all the term IDs for vocabularies referenced in all fields in the CSV.
for column_name in csv_data.fieldnames:
if column_name in field_definitions:
if field_definitions[column_name]['field_type'] == 'typed_relation':
continue
if 'vocabularies' in field_definitions[column_name]:
vocabularies = get_field_vocabularies(config, field_definitions, column_name)
# If there are no vocabularies linked to the current field, 'vocabularies'
# will be False and will throw a TypeError.
try:
num_vocabs = len(vocabularies)
except BaseException:
message = 'Workbench cannot get vocabularies linked to field "' + \
column_name + '". Please confirm that field has at least one vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
all_tids_for_field = []
for vocabulary in vocabularies:
terms = get_term_pairs(config, vocabulary)
if len(terms) == 0:
if config['allow_adding_terms'] is True:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not be enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
else:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
vocab_term_ids = list(terms.keys())
# If more than one vocab in this field, combine their term IDs into a single list.
all_tids_for_field = all_tids_for_field + vocab_term_ids
fields_with_vocabularies.update({column_name: all_tids_for_field})
# If none of the CSV fields are taxonomy reference fields, return.
if len(fields_with_vocabularies) == 0:
return
# Iterate through the CSV and validate each taxonomy fields's values.
new_term_names_in_csv_results = []
for count, row in enumerate(csv_data, start=1):
for column_name in fields_with_vocabularies:
if len(row[column_name]):
new_term_names_in_csv = validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, column_name, row[column_name], count)
new_term_names_in_csv_results.append(new_term_names_in_csv)
if True in new_term_names_in_csv_results and config['allow_adding_terms'] is True:
print("OK, term IDs/names in CSV file exist in their respective taxonomies (and new terms will be created as noted in the Workbench log).")
else:
# All term IDs are in their field's vocabularies.
print("OK, term IDs/names in CSV file exist in their respective taxonomies.")
logging.info("OK, term IDs/names in CSV file exist in their respective taxonomies.")
return vocab_validation_issues
def validate_typed_relation_field_values(config, field_definitions, csv_data):
"""Validate values in fields that are of type 'typed_relation'. Each CSV
value must have this pattern: "string:string:int" or "string:string:string".
If the last segment is a string, it must be term name, a namespaced term name,
or an http URI.
"""
# Define a dictionary to store CSV field: term IDs mappings.
fields_with_vocabularies = dict()
# Get all the term IDs for vocabularies referenced in all fields in the CSV.
vocab_validation_issues = False
for column_name in csv_data.fieldnames:
if column_name in field_definitions:
if 'vocabularies' in field_definitions[column_name]:
vocabularies = get_field_vocabularies(config, field_definitions, column_name)
# If there are no vocabularies linked to the current field, 'vocabularies'
# will be False and will throw a TypeError.
try:
num_vocabs = len(vocabularies)
except BaseException:
message = 'Workbench cannot get vocabularies linked to field "' + \
column_name + '". Please confirm that field has at least one vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
all_tids_for_field = []
for vocabulary in vocabularies:
terms = get_term_pairs(config, vocabulary)
if len(terms) == 0:
if config['allow_adding_terms'] is True:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not be enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
else:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
vocab_term_ids = list(terms.keys())
# If more than one vocab in this field, combine their term IDs into a single list.
all_tids_for_field = all_tids_for_field + vocab_term_ids
fields_with_vocabularies.update({column_name: all_tids_for_field})
# If none of the CSV fields are taxonomy reference fields, return.
if len(fields_with_vocabularies) == 0:
return
typed_relation_fields_present = False
new_term_names_in_csv_results = []
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'typed_relation' and 'typed_relations' in field_definitions[field_name]:
if field_name in row:
typed_relation_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value) == 0:
continue
# First check the required patterns.
if not re.match("^[a-zA-Z]+:[a-zA-Z]+:.+$", field_value.strip()):
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' (' + field_value + ') does not use the pattern required for typed relation fields.'
logging.error(message)
sys.exit('Error: ' + message)
# Then, check to see if the relator string (the first two parts of the
# value) exist in the field_definitions[fieldname]['typed_relations'] list.
typed_relation_value_parts = field_value.split(':', 2)
relator_string = typed_relation_value_parts[0] + ':' + typed_relation_value_parts[1]
if relator_string not in field_definitions[field_name]['typed_relations']:
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' contains a relator (' + relator_string + ') that is not configured for that field.'
logging.error(message)
sys.exit('Error: ' + message)
# Iterate through the CSV and validate the taxonomy term/name/URI in each field subvalue.
for column_name in fields_with_vocabularies:
if len(row[column_name]):
delimited_field_values = row[column_name].split(config['subdelimiter'])
delimited_field_values_without_relator_strings = []
for field_value in delimited_field_values:
# Strip the relator string out from field_value, leaving the vocabulary ID and term ID/name/URI.
term_to_check = re.sub('^[a-zA-Z]+:[a-zA-Z]+:', '', field_value)
delimited_field_values_without_relator_strings.append(term_to_check)
field_value_to_check = config['subdelimiter'].join(delimited_field_values_without_relator_strings)
new_term_names_in_csv = validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, column_name, field_value_to_check, count)
new_term_names_in_csv_results.append(new_term_names_in_csv)
if typed_relation_fields_present is True and True in new_term_names_in_csv_results and config['allow_adding_terms'] is True:
print("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies (and new terms will be created as noted in the Workbench log).")
else:
if typed_relation_fields_present is True:
# All term IDs are in their field's vocabularies.
print("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies.")
logging.info("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies.")
return vocab_validation_issues
def validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, csv_field_name, csv_field_value, record_number):
this_fields_vocabularies = get_field_vocabularies(config, field_definitions, csv_field_name)
this_fields_vocabularies_string = ', '.join(this_fields_vocabularies)
new_term_names_in_csv = False
# Allow for multiple values in one field.
terms_to_check = csv_field_value.split(config['subdelimiter'])
for field_value in terms_to_check:
# If this is a multi-taxonomy field, all term names must be namespaced
# using the vocab_id:term_name pattern, regardless of whether
# config['allow_adding_terms'] is True.
if len(this_fields_vocabularies) > 1 and value_is_numeric(field_value) is not True and not field_value.startswith('http'):
# URIs are unique so don't need namespacing.
split_field_values = field_value.split(config['subdelimiter'])
for split_field_value in split_field_values:
namespaced = re.search(':', field_value)
if namespaced:
# If the : is present, validate that the namespace is one of
# the vocabulary IDs referenced by this field.
field_value_parts = field_value.split(':')
if field_value_parts[0] not in this_fields_vocabularies:
message = 'Vocabulary ID ' + field_value_parts[0] + \
' used in CSV column "' + csv_field_name + '", row ' + str(record_number) + \
' does not match any of the vocabularies referenced by the' + \
' corresponding Drupal field (' + this_fields_vocabularies_string + ').'
logging.error(message)
sys.exit('Error: ' + message)
else:
message = 'Term names in multi-vocabulary CSV field "' + \
csv_field_name + '" require a vocabulary namespace; value '
message_2 = '"' + field_value + '" in row ' \
+ str(record_number) + ' does not have one.'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
validate_term_name_length(split_field_value, str(record_number), csv_field_name)
# Check to see if field_value is a member of the field's vocabularies. First,
# check whether field_value is a term ID.
if value_is_numeric(field_value):
field_value = field_value.strip()
if int(field_value) not in fields_with_vocabularies[csv_field_name]:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ID (' + field_value + ') that is '
if len(this_fields_vocabularies) > 1:
message_2 = 'not in one of the referenced vocabularies (' \
+ this_fields_vocabularies_string + ').'
else:
message_2 = 'not in the referenced vocabulary ("' + \
this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
# Then check values that are URIs.
elif field_value.startswith('http'):
tid_from_uri = get_term_id_from_uri(config, field_value)
if value_is_numeric(tid_from_uri):
if tid_from_uri not in fields_with_vocabularies[csv_field_name]:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term URI (' + field_value + ') that is '
if len(this_fields_vocabularies) > 1:
message_2 = 'not in one of the referenced vocabularies (' \
+ this_fields_vocabularies_string + ').'
else:
message_2 = 'not in the referenced vocabulary ("' \
+ this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
else:
message = 'Term URI "' + field_value + '" used in CSV column "' + \
csv_field_name + '"" row ' + str(record_number) + ' does not match any terms.'
logging.error(message)
sys.exit('Error: ' + message)
# Finally, check values that are string term names.
else:
new_terms_to_add = []
for vocabulary in this_fields_vocabularies:
tid = find_term_in_vocab(config, vocabulary, field_value)
if value_is_numeric(tid) is not True:
# Single taxonomy fields.
if len(this_fields_vocabularies) == 1:
if config['allow_adding_terms'] is True:
# Warn if namespaced term name is not in specified vocab.
if tid is False:
new_term_names_in_csv = True
validate_term_name_length(field_value, str(record_number), csv_field_name)
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + field_value.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ this_fields_vocabularies[0] + '"). That term will be created.'
logging.warning(message + message_2)
else:
new_term_names_in_csv = True
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + field_value.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' + this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
# If this is a multi-taxonomy field, all term names must be namespaced using
# the vocab_id:term_name pattern, regardless of whether
# config['allow_adding_terms'] is True.
if len(this_fields_vocabularies) > 1:
split_field_values = field_value.split(config['subdelimiter'])
for split_field_value in split_field_values:
# Check to see if the namespaced vocab is referenced by this field.
[namespace_vocab_id, namespaced_term_name] = split_field_value.split(':', 1)
if namespace_vocab_id not in this_fields_vocabularies:
message = 'CSV field "' + csv_field_name + '" in row ' \
+ str(record_number) + ' contains a namespaced term name '
message_2 = '(' + namespaced_term_name.strip(
) + '") that specifies a vocabulary not associated with that field.'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
tid = find_term_in_vocab(config, namespace_vocab_id, namespaced_term_name)
# Warn if namespaced term name is not in specified vocab.
if config['allow_adding_terms'] is True:
if tid is False and split_field_value not in new_terms_to_add:
new_term_names_in_csv = True
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + namespaced_term_name.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ namespace_vocab_id + '"). That term will be created.'
logging.warning(message + message_2)
new_terms_to_add.append(split_field_value)
validate_term_name_length(split_field_value, str(record_number), csv_field_name)
# Die if namespaced term name is not specified vocab.
else:
if tid is False:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + namespaced_term_name.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ namespace_vocab_id + '").'
logging.warning(message + message_2)
sys.exit('Error: ' + message + message_2)
return new_term_names_in_csv
def write_to_output_csv(config, id, node_json):
"""Appends a row to the CVS file located at config['output_csv'].
"""
if config['task'] == 'create_from_files':
config['id_field'] = 'ID'
node_dict = json.loads(node_json)
node_field_names = list(node_dict.keys())
node_field_names.insert(0, 'node_id')
node_field_names.insert(0, config['id_field'])
# Don't include these Drupal fields in our output.
fields_to_remove = [
'nid',
'vid',
'created',
'changed',
'langcode',
'default_langcode',
'uid',
'type',
'revision_timestamp',
'revision_translation_affected',
'revision_uid',
'revision_log',
'content_translation_source',
'content_translation_outdated']
for field_to_remove in fields_to_remove:
node_field_names.remove(field_to_remove)
csvfile = open(config['output_csv'], 'a+')
writer = csv.DictWriter(csvfile, fieldnames=node_field_names, lineterminator="\n")
# Check for presence of header row, don't add it if it's already there.
with open(config['output_csv']) as f:
first_line = f.readline()
if not first_line.startswith(config['id_field']):
writer.writeheader()
# Assemble the CSV record to write.
row = dict()
row[config['id_field']] = id
row['node_id'] = node_dict['nid'][0]['value']
row['uuid'] = node_dict['uuid'][0]['value']
row['title'] = node_dict['title'][0]['value']
row['status'] = node_dict['status'][0]['value']
writer.writerow(row)
csvfile.close()
def create_children_from_directory(config, parent_csv_record, parent_node_id, parent_title):
# These objects will have a title (derived from filename), an ID based on the parent's
# id, and a config-defined Islandora model. Content type and status are inherited
# as is from parent. The weight assigned to the page is the last segment in the filename,
# split from the rest of the filename using the character defined in the
# 'paged_content_sequence_seprator' config option.
parent_id = parent_csv_record[config['id_field']]
page_dir_path = os.path.join(config['input_dir'], parent_id)
page_files = os.listdir(page_dir_path)
page_file_return_dict = dict()
for page_file_name in page_files:
filename_without_extension = os.path.splitext(page_file_name)[0]
filename_segments = filename_without_extension.split(
config['paged_content_sequence_seprator'])
weight = filename_segments[-1]
weight = weight.lstrip("0")
# @todo: come up with a templated way to generate the page_identifier,
# and what field to POST it to.
page_identifier = parent_id + '_' + filename_without_extension
page_title = parent_title + ', page ' + weight
# @todo: provide a config option for page content type.
node_json = {
'type': [
{'target_id': config['paged_content_page_content_type'],
'target_type': 'node_type'}
],
'title': [
{'value': page_title}
],
'status': [
{'value': config['published']}
],
'field_model': [
{'target_id': config['paged_content_page_model_tid'],
'target_type': 'taxonomy_term'}
],
'field_member_of': [
{'target_id': parent_node_id,
'target_type': 'node'}
],
'field_weight': [
{'value': weight}
]
}
if 'field_display_hints' in parent_csv_record:
node_json['field_display_hints'] = [{'target_id': parent_csv_record['field_display_hints'], 'target_type': 'taxonomy_term'}]
# Some optional base fields, inherited from the parent object.
if 'uid' in parent_csv_record:
if len(parent_csv_record['uid']) > 0:
node_json['uid'] = [{'target_id': parent_csv_record['uid']}]
if 'created' in parent_csv_record:
if len(parent_csv_record['created']) > 0:
node_json['created'] = [
{'value': parent_csv_record['created']}]
node_headers = {
'Content-Type': 'application/json'
}
node_endpoint = '/node?_format=json'
node_response = issue_request(
config,
'POST',
node_endpoint,
node_headers,
node_json,
None)
if node_response.status_code == 201:
node_uri = node_response.headers['location']
print('+ Node for child "' + page_title + '" created at ' + node_uri + '.')
logging.info('Node for child "%s" created at %s.', page_title, node_uri)
if 'output_csv' in config.keys():
write_to_output_csv(config, page_identifier, node_response.text)
node_nid = node_uri.rsplit('/', 1)[-1]
write_rollback_node_id(config, node_nid)
page_file_path = os.path.join(parent_id, page_file_name)
fake_csv_record = collections.OrderedDict()
fake_csv_record['title'] = page_title
media_response_status_code = create_media(config, page_file_path, node_uri, fake_csv_record)
allowed_media_response_codes = [201, 204]
if media_response_status_code in allowed_media_response_codes:
logging.info("Media for %s created.", page_file_path)
else:
logging.warning('Node for page "%s" not created, HTTP response code was %s.', page_identifier, node_response.status_code)
def write_rollback_config(config):
path_to_rollback_config_file = os.path.join('rollback.yml')
rollback_config_file = open(path_to_rollback_config_file, "w")
yaml.dump(
{'task': 'delete',
'host': config['host'],
'username': config['username'],
'password': config['password'],
'input_dir': config['input_dir'],
'input_csv': 'rollback.csv'},
rollback_config_file)
def prep_rollback_csv(config):
path_to_rollback_csv_file = os.path.join(
config['input_dir'], 'rollback.csv')
if os.path.exists(path_to_rollback_csv_file):
os.remove(path_to_rollback_csv_file)
rollback_csv_file = open(path_to_rollback_csv_file, "a+")
rollback_csv_file.write("node_id" + "\n")
rollback_csv_file.close()
def write_rollback_node_id(config, node_id):
path_to_rollback_csv_file = os.path.join(
config['input_dir'], 'rollback.csv')
rollback_csv_file = open(path_to_rollback_csv_file, "a+")
rollback_csv_file.write(node_id + "\n")
rollback_csv_file.close()
def get_csv_from_google_sheet(config):
url_parts = config['input_csv'].split('/')
url_parts[6] = 'export?gid=' + str(config['google_sheets_gid']) + '&format=csv'
csv_url = '/'.join(url_parts)
response = requests.get(url=csv_url, allow_redirects=True)
if response.status_code == 404:
message = 'Workbench cannot find the Google spreadsheet at ' + config['input_csv'] + '. Please check the URL.'
logging.error(message)
sys.exit('Error: ' + message)
# Sheets that aren't publicly readable return a 302 and then a 200 with a bunch of HTML for humans to look at.
if response.content.strip().startswith(b'<!DOCTYPE'):
message = 'The Google spreadsheet at ' + config['input_csv'] + ' is not accessible.\nPlease check its "Share" settings.'
logging.error(message)
sys.exit('Error: ' + message)
input_csv_path = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
open(input_csv_path, 'wb+').write(response.content)
def get_csv_from_excel(config):
"""Read the input Excel 2010 (or later) file and write it out as CSV.
"""
if os.path.isabs(config['input_csv']):
input_excel_path = config['input_csv']
else:
input_excel_path = os.path.join(config['input_dir'], config['input_csv'])
if not os.path.exists(input_excel_path):
message = 'Error: Excel file ' + input_excel_path + ' not found.'
logging.error(message)
sys.exit(message)
excel_file_path = config['input_csv']
wb = openpyxl.load_workbook(filename=input_excel_path)
ws = wb[config['excel_worksheet']]
headers = []
header_row = ws[1]
ws.delete_rows(0)
for header_cell in header_row:
headers.append(header_cell.value)
records = []
for row in ws:
record = {}
for x in range(len(header_row)):
if headers[x] is not None and row[x] is not None:
record[headers[x]] = row[x].value
records.append(record)
input_csv_path = os.path.join(config['input_dir'], config['excel_csv_filename'])
csv_writer_file_handle = open(input_csv_path, 'w+', newline='')
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=headers)
csv_writer.writeheader()
for record in records:
if (config['id_field'] in record or 'node_id' in record) and record[config['id_field']] is not None:
csv_writer.writerow(record)
csv_writer_file_handle.close()
def download_remote_file(config, url, node_csv_row):
sections = urllib.parse.urlparse(url)
try:
response = requests.get(url, allow_redirects=True)
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(err_timeout)
print('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(error_connection)
print('Error: ' + message)
# create_media() references the path of the downloaded file.
subdir = os.path.join(config['input_dir'], re.sub('[^A-Za-z0-9]+', '_', node_csv_row[config['id_field']]))
Path(subdir).mkdir(parents=True, exist_ok=True)
if config["use_node_title_for_media"]:
filename = re.sub('[^A-Za-z0-9]+', '_', node_csv_row['title'])
if filename[-1] == '_':
filename = filename[:-1]
downloaded_file_path = os.path.join(subdir, filename)
file_extension = os.path.splitext(downloaded_file_path)[1]
else:
downloaded_file_path = os.path.join(subdir, url.split("/")[-1])
file_extension = os.path.splitext(url)[1]
f = open(downloaded_file_path, 'wb+')
f.write(response.content)
f.close
mime = magic.from_file(downloaded_file_path, mime=True)
ext = mimetypes.guess_extension(mime)
if ext == '.jpe':
ext = '.jpg'
if file_extension == '':
os.rename(downloaded_file_path, downloaded_file_path + ext)
downloaded_file_path = downloaded_file_path + ext
return downloaded_file_path
def get_csv_template(config, args):
field_definitions = get_field_definitions(config)
field_labels = collections.OrderedDict()
field_labels['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'LABEL (REMOVE THIS ROW)'
for field_name in field_definitions:
if field_definitions[field_name]['label'] != '':
field_labels[field_name] = field_definitions[field_name]['label']
else:
field_labels[field_name] = ''
required = collections.OrderedDict()
required['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'REQUIRED IN CREATE TASKS (REMOVE THIS ROW)'
for field_name in field_definitions:
if field_definitions[field_name]['required'] != '':
if field_definitions[field_name]['required'] is True:
required[field_name] = 'Yes'
else:
required[field_name] = 'No'
required['title'] = 'Yes'
required['uid'] = 'No'
required['langcode'] = 'No'
required['created'] = 'No'
required[config['id_field']] = 'Yes'
if config['nodes_only'] is True:
required['file'] = 'Yes'
else:
required['file'] = 'No'
mapping = dict()
mapping['string'] = 'Free text'
mapping['string_long'] = 'Free text'
mapping['text'] = 'Free text'
mapping['text_long'] = 'Free text'
mapping['geolocation'] = '+49.16,-123.93'
mapping['entity_reference'] = '100 [or term name or http://foo.com/someuri]'
mapping['edtf'] = '2020-10-28'
mapping['typed_relation'] = 'relators:art:30'
mapping['integer'] = 100
sample_data = collections.OrderedDict()
sample_data['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'SAMPLE DATA (REMOVE THIS ROW)'
sample_data[config['id_field']] = '0001'
sample_data['file'] = 'myimage.jpg'
sample_data['uid'] = '21'
sample_data['langcode'] = 'fr'
sample_data['created'] = '2020-11-15T23:49:22+00:00'
sample_data['title'] = 'Free text'
for field_name in field_definitions:
if field_definitions[field_name]['field_type'] in mapping:
sample_data[field_name] = mapping[field_definitions[field_name]['field_type']]
else:
sample_data[field_name] = ''
csv_file_path = os.path.join(config['input_dir'], config['input_csv'] + '.csv_file_template')
csv_file = open(csv_file_path, 'a+')
writer = csv.DictWriter(csv_file, fieldnames=sample_data.keys(), lineterminator="\n")
writer.writeheader()
# We want the labels and required rows to appear as the second and third rows so
# add them before we add the sample data.
writer.writerow(field_labels)
writer.writerow(required)
writer.writerow(sample_data)
cardinality = collections.OrderedDict()
cardinality['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'NUMBER OF VALUES ALLOWED (REMOVE THIS ROW)'
cardinality[config['id_field']] = '1'
cardinality['file'] = '1'
cardinality['uid'] = '1'
cardinality['langcode'] = '1'
cardinality['created'] = '1'
cardinality['title'] = '1'
for field_name in field_definitions:
if field_definitions[field_name]['cardinality'] == -1:
cardinality[field_name] = 'unlimited'
else:
cardinality[field_name] = field_definitions[field_name]['cardinality']
writer.writerow(cardinality)
docs = dict()
docs['string'] = 'Single-valued fields'
docs['string_long'] = 'Single-valued fields'
docs['text'] = 'Single-valued fields'
docs['text_long'] = 'Single-valued fields'
docs['geolocation'] = 'Geolocation fields'
docs['entity_reference'] = 'Taxonomy reference fields'
docs['edtf'] = 'EDTF fields'
docs['typed_relation'] = 'Typed Relation fields'
docs['integer'] = 'Single-valued fields'
docs_tips = collections.OrderedDict()
docs_tips['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'SECTION IN DOCUMENTATION (REMOVE THIS ROW)'
docs_tips[config['id_field']] = 'Required fields'
docs_tips['file'] = 'Required fields'
docs_tips['uid'] = 'Base fields'
docs_tips['langcode'] = 'Base fields'
docs_tips['created'] = 'Base fields'
docs_tips['title'] = 'Base fields'
for field_name in field_definitions:
if field_definitions[field_name]['field_type'] in docs:
doc_reference = docs[field_definitions[field_name]['field_type']]
docs_tips[field_name] = doc_reference
else:
docs_tips[field_name] = ''
docs_tips['field_member_of'] = ''
writer.writerow(docs_tips)
csv_file.close()
print('CSV template saved at ' + csv_file_path + '.')
sys.exit()
def get_percentage(part, whole):
return 100 * float(part) / float(whole)
``` |
{
"source": "jordanemedlock/psychtruths",
"score": 2
} |
#### File: jordanemedlock/psychtruths/app.py
```python
import tornado.ioloop
import tornado.web
import time
from temboo.Library.Tumblr.Post import RetrievePublishedPosts
from temboo.core.session import TembooSession
from tornado.escape import json_decode, json_encode
session = TembooSession("jordanemedlock", "PsychTruths", "SFiEEZ8RCnzX8U8nSAbsM0QII3gcEZl6")
choreo = RetrievePublishedPosts(session)
def formatTime(t):
t = time.gmtime(t)
return time.strftime('%A, %B %e, %Y',t)
def readJSON(fileName):
f = open(fileName)
return json_decode(f.read())
def writeJSON(fileName, json):
f = open(fileName, 'w')
f.write(json_encode(json))
def getTags(update=True):
if update:
posts, num_posts = getPosts(0)
high = num_posts/10+1
print high
for page in xrange(1,high):
posts2, _ = getPosts(page)
posts += posts2
tags = dict()
for post in posts:
for tag in post['tags']:
if tag in tags:
tags[tag] += 1
else:
tags[tag] = 1
return posts, tags
else:
return readJSON('tags.json')
def matches(query):
def inner(post):
def exists(name, d):
return name in d and d[name] is not None
tagMatches = query in post['tags']
titleMatches = exists('title',post) and query.lower() in post['title'].lower()
bodyMatches = exists('body', post) and query.lower() in post['body'].lower()
return tagMatches or titleMatches or bodyMatches
return inner
def getPosts(page, update=True, tag=None, query=None):
if update:
choreo_inputs = choreo.new_input_set()
choreo_inputs.set_APIKey("u06kxugAWrGDtTxfU5IikKXg8oXyfFO8JqabS5HDyq3qPYquHH")
choreo_inputs.set_BaseHostname("psychologytruths.tumblr.com")
choreo_inputs.set_Limit("10")
choreo_inputs.set_Offset(str(page * 10))
results = choreo.execute_with_results(choreo_inputs)
results = json_decode(results.get_Response())
return results['response']['posts'], results['response']['total_posts']
else:
posts = readJSON("posts.json")
if tag is not None:
posts = filter(lambda x: tag in x['tags'], posts)
if query is not None:
posts = filter(matches(query), posts)
return posts, len(posts)
class BlogHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, page):
base_url="/%s"
page = int(page or 0)
blog_posts, num_posts = getPosts(page, update=False)
blog_posts = blog_posts[page*10:(page+1)*10]
has_next = (page+1)*10 <= num_posts
has_prev = page > 0
tags = getTags(update=False)
tags = sorted(tags.iteritems(), cmp=lambda x,y:cmp(x[1],y[1]), reverse=True)
self.render("blog.html",
blog_posts=blog_posts,
has_prev=has_prev,
has_next=has_next,
page=page,
tags=tags,
this_tag=None,
query=None,
base_url=base_url,
formatTime=formatTime)
class TagHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, tag, page):
base_url = "/tag/"+tag+"/%s"
page = int(page or 0)
blog_posts, num_posts = getPosts(page, update=False, tag=tag)
blog_posts = blog_posts[page*10:(page+1)*10]
has_next = (page+1)*10 <= num_posts
has_prev = page > 0
tags = getTags(update=False)
tags = sorted(tags.iteritems(), cmp=lambda x,y:cmp(x[1],y[1]), reverse=True)
self.render("blog.html",
blog_posts=blog_posts,
has_prev=has_prev,
has_next=has_next,
page=page,
tags=tags,
this_tag=tag,
query=None,
base_url=base_url,
formatTime=formatTime)
class SearchHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, page):
base_url = "/search/%s"
query=self.get_argument('query',default='')
page = int(page or 0)
blog_posts, num_posts = getPosts(page, update=False, query=query)
blog_posts = blog_posts[page*10:(page+1)*10]
has_next = (page+1)*10 <= num_posts
has_prev = page > 0
tags = getTags(update=False)
tags = sorted(tags.iteritems(), cmp=lambda x,y:cmp(x[1],y[1]), reverse=True)
self.render("blog.html",
blog_posts=blog_posts,
has_prev=has_prev,
has_next=has_next,
page=page,
tags=tags,
this_tag=None,
query=query,
base_url=base_url,
formatTime=formatTime)
class UpdateHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
posts, tags = getTags()
writeJSON("posts.json", posts)
writeJSON("tags.json", tags)
print "all done"
self.redirect('/')
class ContactHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.render("contact.html")
settings = {
"template_path": "templates",
"autoreload": True
}
application = tornado.web.Application([
(r"/update/?", UpdateHandler),
(r"/contact/?", ContactHandler),
(r"/([0-9]*)", BlogHandler),
(r"/tag/([^/]*)/?([0-9]*)", TagHandler),
(r"/search/?([0-9]*)", SearchHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path":"static"})
], **settings)
if __name__ == "__main__":
application.listen(7000)
tornado.ioloop.IOLoop.current().start()
```
#### File: temboo/core/choreography.py
```python
import datetime
import json
import pprint
from temboo.core.resource import _TembooResource
from temboo.core.util import ExecutionStatus
from temboo.core.session import TembooSession
class Choreography(_TembooResource):
resource_path = '/choreos'
def __init__(self, temboo_session, temboo_path):
""" Create a Choreography instance.
temboo_session -- an instance of TembooSession.
temboo_path -- a string (or an object that can be converted
into a string) that represents the location
of this choreo on the Temboo server. For example
/Choreos/MyStore/RunReport
"""
super(Choreography, self).__init__(temboo_session, temboo_path)
# For proxied executions from the JS SDK
self._jsClientVersion = None
def execute_with_results(self, choreo_inputs=None):
"""Runs the choreography and waits for it to complete.
This method will run this choreography with the supplied
inputs, block while waiting for it to complete,
and return the results as a dict with keys of
'output' and 'execution'.
choreo_inputs -- an optional instance of InputSet (default = None)
Returns a ResultSet instance.
"""
choreo_inputs = choreo_inputs if choreo_inputs else InputSet()
body = choreo_inputs.format_inputs();
# Append JS client version string if present
params = {"source_id": TembooSession.SOURCE_ID + (('-' + self._jsClientVersion) if self._jsClientVersion else '')}
return self._make_result_set(self._temboo_session.post(self.get_session_path(), body, params), self._temboo_path)
def _make_result_set(self, result, path):
return ResultSet(result, path)
def execute_async(self, choreo_inputs=None, store_results=False):
"""Runs the choreography asynchronously, without waiting for results.
This method will run this choreography with the supplied
inputs. It does not wait for the choreography to complete.
choreo_inputs -- an optional instance of InputSet (default = None)
store_results -- a boolean that determines whether choreo results
are saved for later retrieval or discarded immediately
on choreo completion. (default = False)
Returns a ChoreographyExecution instance that can be used to poll
for status or get the results when the choreography is complete
(if store_results is True)
"""
choreo_inputs = choreo_inputs if choreo_inputs else InputSet()
body = choreo_inputs.format_inputs();
params = {'mode': 'async', 'store_results':bool(store_results),
"source_id":TembooSession.SOURCE_ID}
result = self._temboo_session.post(self.get_session_path(), body, params)
exec_id = result.get('id', None)
if exec_id:
return self._make_execution(self._temboo_session, exec_id, self._temboo_path)
return None
def _make_execution(self, session, exec_id, path):
return ChoreographyExecution(session, exec_id, path)
def _get_resource_path(self):
return self.resource_path
def _set_js_client_version(self, jsClientVersion):
"""Used to specify the version of Temboo JS SDK used for a proxied choreo execution
jsClientVersion -- the client version string
"""
self._jsClientVersion = jsClientVersion
class InputSet(object):
def __init__(self):
self.inputs = {}
self.preset_uri = None
self.outputs = []
def _set_input(self, name, value):
"""Adds (or replaces) an input variable value in the InputSet
name -- the name of the input variable.
value -- the value of the input variable. If not already a string,
will be converted to a string before sending to the server.
"""
self.inputs[name] = value
def _set_inputs(self, inputs):
"""Adds (or replaces) the names and values passed in to this InputSet
inputs -- can be a dictionary of name/value pairs
or an iterable of key/value pairs as a
tuple or other iterable of length two.
"""
self.inputs.update(inputs)
def add_output_filter(self, filterName, path, outputVariableName):
"""
Add an output filter to this result set.
"""
self.outputs.append((filterName, path, outputVariableName))
def set_profile(self, name):
"""Adds (or replaces) the name of the credential to be used as an input
to the Choreo execution
"""
self.preset_uri = name
set_credential = set_profile # Support legacy method name
def format_inputs(self):
"""Formats the JSON body of a choreography execution POST request.
"""
all_inputs ={}
if self.inputs:
all_inputs['inputs'] = [{'name':name, 'value':self.inputs[name]} for name in self.inputs]
if self.outputs:
all_inputs['outputFilters'] = [
{'name':name, 'path':path, 'variable':varname}
for name, path, varname in self.outputs
]
if self.preset_uri:
all_inputs['preset'] = str(self.preset_uri)
return json.dumps(all_inputs)
class ResultSet(object):
def __init__(self, result, path=None):
"""
Makes a result set from the JSON result returned
from a choreo execution.
result -- may be either a dictionary containing choreo execution
results or another ResultSet instance. Giving another
ResultSet instance is useful for converting a generic
ResultSet returned by ChoreographyExecution.get_results
into a choreo-specific result set.
path -- the temboo path of the choreo that generated these results.
(ignored if result is a ResultSet)
"""
if isinstance(result, ResultSet):
self._result = result._result
self._path = result._path
else:
self._result = result
self._path = path
self._exec_data = self._result.get("execution", {})
self._output = self._result.get("output", {})
@property
def path(self):
return self._path
@property
def exec_id(self):
return self._exec_data.get('id', None)
@property
def status(self):
return self._exec_data.get('status', ExecutionStatus.ERROR)
@property
def start_time(self):
return self._exec_data.get('starttime', None)
@property
def start_time_UTC(self):
return self._time_to_UTC(self.start_time)
@property
def end_time(self):
return self._exec_data.get('endtime', None)
@property
def end_time_UTC(self):
return self._time_to_UTC(self.end_time)
@property
def error_time(self):
return self._exec_data.get('errortime', None)
@property
def error_time_UTC(self):
return self._time_to_UTC(self.error_time)
@property
def last_error(self):
return self._exec_data.get('lasterror', None)
@property
def outputs(self):
return self._output
def _time_to_UTC(self, millis):
if millis:
#Server gives us time in milliseconds.
#We need that as a floating point value in seconds.
t = float(millis)/1000.0
return datetime.datetime.utcfromtimestamp(t)
return None
def __str__(self):
msg = []
msg.append("Choreo Execution Results")
msg.append("Path: " + str(self.path))
msg.append("Execution ID: " + str(self.exec_id))
msg.append("Status: " + str(self.status))
msg.append("Start Time: " + str(self.start_time_UTC) + " UTC")
msg.append("End Time: " + str(self.end_time_UTC) + " UTC")
msg.append("Error Time: " + str(self.error_time_UTC) + " UTC")
msg.append("Last Error: " + str(self.last_error))
msg.append("Outputs:")
msg.append(pprint.pformat(self._output, width=1))
return "\n".join(msg)
class ChoreographyExecution(_TembooResource):
resource_path = "/choreo-executions"
def __init__(self, temboo_session, exec_id, choreo_uri=None):
""" Create a ChoreographyExecution instance.
ChoreographyExecution objects are normally created and
returned by Choreography.execute_async.
temboo_session -- an instance of TembooSession.
exec_id -- the execution id of the executing choreo
"""
super(ChoreographyExecution, self).__init__(temboo_session, exec_id)
self._result_set = None
self._status = None
self.choreo_uri = choreo_uri
self.exec_id = exec_id
@property
def status(self):
if not self._status or self._status == ExecutionStatus.RUNNING:
response = self._temboo_session.get_content(self.get_session_path())
if response:
exec_info = response['execution']
self._status = exec_info['status']
return self._status
def _get_resource_path(self):
return self.resource_path
@property
def result_set(self):
"""
Return result set, if it has been populated yet.
"""
if self.status != ExecutionStatus.RUNNING and self._result_set is None:
response = self._temboo_session.get_content(self.get_session_path(), {'view':'outputs'})
self._result_set = self._make_result_set(response, self._temboo_path)
return self._result_set
def _make_result_set(self, response, path):
return ResultSet(response, path)
def __str__(self):
msg = []
msg.append("Choreo Execution")
msg.append("Path: " + str(self.choreo_uri))
msg.append("Execution ID: " + str(self.exec_id))
msg.append("Status: " + str(self.status))
return "\n".join(msg)
```
#### File: LittleSis/Relationship/GetOneRelationship.py
```python
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetOneRelationship(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetOneRelationship Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetOneRelationship, self).__init__(temboo_session, '/Library/LittleSis/Relationship/GetOneRelationship')
def new_input_set(self):
return GetOneRelationshipInputSet()
def _make_result_set(self, result, path):
return GetOneRelationshipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetOneRelationshipChoreographyExecution(session, exec_id, path)
class GetOneRelationshipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetOneRelationship
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from LittleSis.org.)
"""
super(GetOneRelationshipInputSet, self)._set_input('APIKey', value)
def set_EntityIDs(self, value):
"""
Set the value of the EntityIDs input for this Choreo. ((required, string) The IDs of the entities between which you want to find relationships. Format is a semicolon delimited string (e.g. 1026;1))
"""
super(GetOneRelationshipInputSet, self)._set_input('EntityIDs', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Format of the response returned by LittleSis.org. Acceptable inputs: xml or json. Defaults to xml)
"""
super(GetOneRelationshipInputSet, self)._set_input('ResponseFormat', value)
class GetOneRelationshipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetOneRelationship Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LittleSis.org.)
"""
return self._output.get('Response', None)
class GetOneRelationshipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetOneRelationshipResultSet(response, path)
``` |
{
"source": "jordaneremieff/aeroplane",
"score": 2
} |
#### File: aeroplane/aeroplane/models.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.text import slugify
class Page(models.Model):
title = models.CharField(
verbose_name=_("title"),
max_length=255,
help_text=_("The unique title of the page displayed to the public."),
)
slug = models.SlugField(
verbose_name=_("slug"),
allow_unicode=True,
max_length=255,
help_text=_("The unique slug identifier used in URL addresses."),
)
content = models.TextField()
revisions = models.JSONField(default=dict, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = "pages"
verbose_name = "Page"
verbose_name_plural = "Pages"
constraints = [
models.UniqueConstraint(fields=["slug"], name="unique_slug"),
models.UniqueConstraint(fields=["title"], name="unique_title"),
]
indexes = [
models.Index(fields=["-created_at"]),
models.Index(fields=["-updated_at"]),
models.Index(fields=["slug"]),
]
def __str__(self) -> str:
return self.title
def save(self, *args, **kwargs) -> None:
if not self.slug:
self.slug = slugify(self.title, allow_unicode=True)
super().save(*args, **kwargs)
``` |
{
"source": "jordaneremieff/headless-api",
"score": 3
} |
#### File: src/app/models.py
```python
from typing import Optional
from pydantic import BaseModel, Field, validator
class Screenshot(BaseModel):
url: str = Field(
description="The URL of the page to screenshot", example="https://google.com"
)
window_width: int = Field(
default=1240,
description="Width of the headless browser window (in px)",
example=1240,
lt=9000,
)
window_height: int = Field(
default=1854,
description="Height of the headless browser window (in px)",
example=1854,
lt=9000,
)
export_type: Optional[str] = Field(
default="png",
description="Extension of the exported file (png, pdf)",
example="png",
)
@validator("export_type")
def validate_export_type(cls, v):
assert v in ("png", "pdf"), "Only PNG and PDF exports are supported"
return v
``` |
{
"source": "jordaneremieff/serverless-mangum-examples",
"score": 2
} |
#### File: django-example/app/views.py
```python
from pprint import pformat
from django.views.generic import TemplateView
class HelloWorldView(TemplateView):
template_name = "helloworld.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["scope"] = pformat(self.request.scope)
return context
``` |
{
"source": "jordaneremieff/starlette-graphene-tortoise",
"score": 2
} |
#### File: starlette-graphene-tortoise/myapp/app.py
```python
from tortoise import Tortoise
from graphql.execution.executors.asyncio import AsyncioExecutor
from starlette.applications import Starlette
from starlette.graphql import GraphQLApp
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings
from myapp.schema import schema
config = Config(".env")
DEBUG = config("DEBUG", cast=bool, default=False)
DATABASE_URL = config("DATABASE_URL", cast=str)
APP_MODELS = config("APP_MODELS", cast=CommaSeparatedStrings)
app = Starlette(debug=DEBUG)
app.add_route("/graphql", GraphQLApp(schema=schema, executor=AsyncioExecutor()))
@app.on_event("startup")
async def on_startup() -> None:
await Tortoise.init(db_url=DATABASE_URL, modules={"models": APP_MODELS})
@app.on_event("shutdown")
async def on_shutdown() -> None:
await Tortoise.close_connections()
``` |
{
"source": "jordaneremieff/sublime_djhtml",
"score": 2
} |
#### File: jordaneremieff/sublime_djhtml/sublime_djhtml.py
```python
import os
import sys
import logging
import sublime
import sublime_plugin
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from vendor.djhtml.__main__ import verify_changed
from vendor.djhtml.modes import DjHTML
__version__ = "0.1.0"
__version_info__ = (0, 1, 0)
logger = logging.getLogger("DjHTML")
SUBLIME_SETTINGS = "sublime_djhtml.sublime-settings"
def validate_and_indent(source):
settings = sublime.load_settings(SUBLIME_SETTINGS)
tabwidth = settings.get("tabwidth", 4)
formatted = DjHTML(source).indent(tabwidth)
if not verify_changed(source, formatted):
return None
return formatted
def check_indent_on_save(view):
settings = sublime.load_settings(SUBLIME_SETTINGS)
if settings.get("indent_on_save") and view.settings().get("syntax") in settings.get(
"enabled_syntax", []
):
view.run_command("djhtml_indent")
class DjhtmlIndentCommand(sublime_plugin.TextCommand):
def run(self, view):
region = sublime.Region(0, self.view.size())
source = self.view.substr(region)
error = None
try:
formatted = validate_and_indent(source)
except Exception:
error = (
"DjHTML: An unknown error occured, the template could not be processed."
)
logger.exception(error)
if error:
sublime.error_message(error)
elif not formatted:
sublime.status_message(
"No indentation required, template file is unchanged."
)
else:
sublime.status_message("Template has been reindented.")
self.view.replace(view, region, formatted)
class DjhtmlIndentOnSaveListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
check_indent_on_save(view)
```
#### File: vendor/djhtml/lines.py
```python
class Line:
"""
A single output line including the final newline.
"""
def __init__(self, nr=1):
self.nr = nr
self.tokens = []
self.level = 0
def append(self, token):
"""
Append tokens to the line.
"""
token.line_nr = self.nr
self.tokens.append(token)
@property
def text(self):
"""
The unindented text of this line without leading/trailing spaces.
"""
return "".join([str(token) for token in self.tokens]).strip()
def indent(self, tabwidth):
"""
The final, indented text of this line. Make sure to set the level
and optionally offset before calling this method.
"""
if self.tokens:
if self.tokens[0].ignore:
return "".join([str(token) for token in self.tokens]) + "\n"
elif self.text:
offset = self.tokens[0].offset * tabwidth
spaces = tabwidth * self.level + offset
return " " * spaces + self.text + "\n"
return "\n"
def __repr__(self):
return repr(self.tokens)
def __bool__(self):
return bool(self.tokens and self.text)
def __next__(self):
return Line(nr=self.nr + 1)
```
#### File: vendor/djhtml/__main__.py
```python
import argparse
import sys
from . import modes
def verify_changed(source, result):
"""
Verify that the source is either exactly equal to the result or
that the result has only changed by added or removed whitespace.
"""
output_lines = result.split("\n")
changed = False
for line_nr, line in enumerate(source.split("\n")):
if line != output_lines[line_nr]:
changed = True
if line.strip() != output_lines[line_nr].strip():
raise IndentationError("Non-whitespace changes detected. Core dumped.")
return changed
def main():
"""
Entrypoint for all 4 command-line tools. Typical usage:
$ djhtml -i file1.html file2.html
"""
Mode = modes.DjHTML
if sys.argv[0].endswith("djtxt"):
Mode = modes.DjTXT
if sys.argv[0].endswith("djcss"):
Mode = modes.DjCSS
if sys.argv[0].endswith("djjs"):
Mode = modes.DjJS
changed_files = 0
unchanged_files = 0
problematic_files = 0
parser = argparse.ArgumentParser(
description=(
"DjHTML is a fully automatic template indenter that works with mixed"
" HTML/CSS/Javascript templates that contain Django or Jinja template"
" tags. It works similar to other code-formatting tools such as Black and"
" interoperates nicely with pre-commit. Full documentation can be found at"
" https://github.com/rtts/djhtml"
),
)
parser.add_argument(
"-i", "--in-place", action="store_true", help="modify files in-place"
)
parser.add_argument("-c", "--check", action="store_true", help="don't modify files")
parser.add_argument("-q", "--quiet", action="store_true", help="be quiet")
parser.add_argument(
"-t",
"--tabwidth",
metavar="N",
type=int,
default=4,
help="tabwidth (default is 4)",
)
parser.add_argument(
"-o",
"--output-file",
metavar="filename",
default="-",
help="output filename",
)
parser.add_argument(
"input_filenames",
metavar="filenames",
nargs="*",
default=["-"],
help="input filenames",
)
parser.add_argument("-d", "--debug", action="store_true", help=argparse.SUPPRESS)
args = parser.parse_args()
if args.in_place and "-" in args.input_filenames:
sys.exit("I’m sorry Dave, I’m afraid I can’t do that")
if len(args.input_filenames) > 1 and not args.in_place and not args.check:
sys.exit("Will not modify files in-place without -i option")
for input_filename in args.input_filenames:
# Read input file
try:
input_file = (
sys.stdin if input_filename == "-" else open(input_filename, "r")
)
source = input_file.read()
except Exception as e:
problematic_files += 1
if not args.quiet:
print(f"Error opening {input_filename}: {e}", file=sys.stderr)
continue
# Indent input file
try:
if args.debug:
print(Mode(source).debug())
sys.exit()
result = Mode(source).indent(args.tabwidth)
except SyntaxError as e:
problematic_files += 1
if not args.quiet:
print(
f"Syntax error in {input_file.name}:"
f" {str(e) or e.__class__.__name__}",
file=sys.stderr,
)
continue
except Exception:
print(
f"\nFatal error while processing {input_file.name}\n\n"
" If you have time and are using the latest version, we\n"
" would very much appreciate if you opened an issue on\n"
" https://github.com/rtts/djhtml/issues\n",
file=sys.stderr,
)
raise
finally:
input_file.close()
changed = verify_changed(source, result)
# Print to stdout and exit
if not args.in_place and not args.check and args.output_file == "-":
if not args.quiet:
print(result, end="")
sys.exit(1 if args.check and changed else 0)
# Write output file
if changed and args.check:
changed_files += 1
elif changed:
output_filename = input_file.name if args.in_place else args.output_file
try:
output_file = open(output_filename, "w")
output_file.write(result)
output_file.close()
changed_files += 1
except Exception as e:
problematic_files += 1
if not args.quiet:
print(f"Error writing {output_filename}: {e}", file=sys.stderr)
continue
if not args.quiet:
print(
f"reindented {output_file.name}",
file=sys.stderr,
)
else:
unchanged_files += 1
# Print final summary
if not args.quiet:
s = "s" if changed_files != 1 else ""
have = "would have" if args.check else "have" if s else "has"
print(
f"{changed_files} template{s} {have} been reindented.",
file=sys.stderr,
)
if unchanged_files:
s = "s" if unchanged_files != 1 else ""
were = "were" if s else "was"
print(
f"{unchanged_files} template{s} {were} already perfect!",
file=sys.stderr,
)
if problematic_files:
s = "s" if problematic_files != 1 else ""
print(
f"{problematic_files} template{s} could not be processed due to an"
" error.",
file=sys.stderr,
)
sys.exit(changed_files if args.check else problematic_files)
if __name__ == "__main__":
main()
```
#### File: vendor/djhtml/tokens.py
```python
class Token:
"""
Container class for token types.
"""
class _Base:
indents = False
dedents = False
ignore = False
def __init__(self, text, kind="", offset=0):
self.text = text
self.kind = kind
self.offset = offset
def __str__(self):
return self.text
def __repr__(self):
return f"({self.__class__.__name__}:{repr(self.text)})"
class Text(_Base):
pass
class Ignore(_Base):
ignore = True
class Open(_Base):
indents = True
class Close(_Base):
dedents = True
class OpenAndClose(_Base):
indents = True
dedents = True
``` |
{
"source": "jordan-evans/aws-service-catalog-puppet",
"score": 2
} |
#### File: template_builder/hub/bootstrap.py
```python
import troposphere as t
import yaml
from awacs import iam as awscs_iam
from troposphere import codebuild
from troposphere import codecommit
from troposphere import codepipeline
from troposphere import iam
from troposphere import s3
from troposphere import sns
from troposphere import sqs
from troposphere import ssm
from servicecatalog_puppet import constants
def get_template(
puppet_version,
all_regions,
source,
is_caching_enabled,
is_manual_approvals: bool,
scm_skip_creation_of_repo: bool,
should_validate: bool,
) -> t.Template:
is_codecommit = source.get("Provider", "").lower() == "codecommit"
is_github = source.get("Provider", "").lower() == "github"
is_codestarsourceconnection = (
source.get("Provider", "").lower() == "codestarsourceconnection"
)
is_custom = source.get("Provider", "").lower() == "custom"
is_s3 = source.get("Provider", "").lower() == "s3"
description = f"""Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies
{{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}"""
template = t.Template(Description=description)
version_parameter = template.add_parameter(
t.Parameter("Version", Default=puppet_version, Type="String")
)
org_iam_role_arn_parameter = template.add_parameter(
t.Parameter("OrgIamRoleArn", Default="None", Type="String")
)
with_manual_approvals_parameter = template.add_parameter(
t.Parameter(
"WithManualApprovals",
Type="String",
AllowedValues=["Yes", "No"],
Default="No",
)
)
puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetCodePipelineRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetCodePipelineRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
source_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"SourceRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the SourceRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_generate_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetGenerateRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetGenerateRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_deploy_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetDeployRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetDeployRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetProvisioningRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetProvisioningRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"CloudFormationDeployRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the CloudFormationDeployRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
deploy_environment_compute_type_parameter = template.add_parameter(
t.Parameter(
"DeployEnvironmentComputeType",
Type="String",
Description="The AWS CodeBuild Environment Compute Type",
Default="BUILD_GENERAL1_SMALL",
)
)
spoke_deploy_environment_compute_type_parameter = template.add_parameter(
t.Parameter(
"SpokeDeployEnvironmentComputeType",
Type="String",
Description="The AWS CodeBuild Environment Compute Type for spoke execution mode",
Default="BUILD_GENERAL1_SMALL",
)
)
deploy_num_workers_parameter = template.add_parameter(
t.Parameter(
"DeployNumWorkers",
Type="Number",
Description="Number of workers that should be used when running a deploy",
Default=10,
)
)
puppet_role_name_parameter = template.add_parameter(
t.Parameter("PuppetRoleName", Type="String", Default="PuppetRole")
)
puppet_role_path_template_parameter = template.add_parameter(
t.Parameter("PuppetRolePath", Type="String", Default="/servicecatalog-puppet/")
)
template.add_condition(
"ShouldUseOrgs", t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), "None"))
)
template.add_condition(
"HasManualApprovals", t.Equals(t.Ref(with_manual_approvals_parameter), "Yes")
)
template.add_resource(
s3.Bucket(
"StacksRepository",
BucketName=t.Sub("sc-puppet-stacks-repository-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
BlockPublicAcls=True,
BlockPublicPolicy=True,
IgnorePublicAcls=True,
RestrictPublicBuckets=True,
),
Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}),
)
)
manual_approvals_param = template.add_resource(
ssm.Parameter(
"ManualApprovalsParam",
Type="String",
Name="/servicecatalog-puppet/manual-approvals",
Value=t.Ref(with_manual_approvals_parameter),
)
)
template.add_resource(
ssm.Parameter(
"SpokeDeployEnvParameter",
Type="String",
Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
Value=t.Ref(spoke_deploy_environment_compute_type_parameter),
)
)
param = template.add_resource(
ssm.Parameter(
"Param",
Type="String",
Name="service-catalog-puppet-version",
Value=t.Ref(version_parameter),
)
)
partition_parameter = template.add_resource(
ssm.Parameter(
"PartitionParameter",
Type="String",
Name="/servicecatalog-puppet/partition",
Value=t.Ref("AWS::Partition"),
)
)
puppet_role_name_parameter = template.add_resource(
ssm.Parameter(
"PuppetRoleNameParameter",
Type="String",
Name="/servicecatalog-puppet/puppet-role/name",
Value=t.Ref(puppet_role_name_parameter),
)
)
puppet_role_path_parameter = template.add_resource(
ssm.Parameter(
"PuppetRolePathParameter",
Type="String",
Name="/servicecatalog-puppet/puppet-role/path",
Value=t.Ref(puppet_role_path_template_parameter),
)
)
share_accept_function_role = template.add_resource(
iam.Role(
"ShareAcceptFunctionRole",
RoleName="ShareAcceptFunctionRole",
ManagedPolicyArns=[
t.Sub(
"arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
)
],
Path=t.Ref(puppet_role_path_template_parameter),
Policies=[
iam.Policy(
PolicyName="ServiceCatalogActions",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Resource": {
"Fn::Sub": "arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}"
},
"Effect": "Allow",
}
],
},
)
],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["lambda.amazonaws.com"]},
}
],
},
)
)
provisioning_role = template.add_resource(
iam.Role(
"ProvisioningRole",
RoleName="PuppetProvisioningRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codebuild.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"AWS": {"Fn::Sub": "${AWS::AccountId}"}},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
puppet_provisioning_role_permissions_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
cloud_formation_deploy_role = template.add_resource(
iam.Role(
"CloudFormationDeployRole",
RoleName="CloudFormationDeployRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["cloudformation.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"AWS": {"Fn::Sub": "${AWS::AccountId}"}},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
cloud_formation_deploy_role_permissions_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
pipeline_role = template.add_resource(
iam.Role(
"PipelineRole",
RoleName="PuppetCodePipelineRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codepipeline.amazonaws.com"]},
}
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
puppet_code_pipeline_role_permission_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
source_role = template.add_resource(
iam.Role(
"SourceRole",
RoleName="PuppetSourceRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codepipeline.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"AWS": {
"Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:root"
}
},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(source_role_permissions_boundary_parameter),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
dry_run_notification_topic = template.add_resource(
sns.Topic(
"DryRunNotificationTopic",
DisplayName="service-catalog-puppet-dry-run-approvals",
TopicName="service-catalog-puppet-dry-run-approvals",
Condition="HasManualApprovals",
)
)
deploy_role = template.add_resource(
iam.Role(
"DeployRole",
RoleName="PuppetDeployRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codebuild.amazonaws.com"]},
}
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(puppet_deploy_role_permission_boundary_parameter),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
num_workers_ssm_parameter = template.add_resource(
ssm.Parameter(
"NumWorkersSSMParameter",
Type="String",
Name="/servicecatalog-puppet/deploy/num-workers",
Value=t.Sub("${DeployNumWorkers}"),
)
)
parameterised_source_bucket = template.add_resource(
s3.Bucket(
"ParameterisedSourceBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub("sc-puppet-parameterised-runs-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
source_stage = codepipeline.Stages(
Name="Source",
Actions=[
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="S3",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="ParameterisedSource")
],
Configuration={
"S3Bucket": t.Ref(parameterised_source_bucket),
"S3ObjectKey": "parameters.zip",
"PollForSourceChanges": True,
},
Name="ParameterisedSource",
)
],
)
install_spec = {
"runtime-versions": dict(python="3.7"),
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
}
deploy_env_vars = [
{
"Type": "PLAINTEXT",
"Name": "PUPPET_ACCOUNT_ID",
"Value": t.Ref("AWS::AccountId"),
},
{"Type": "PLAINTEXT", "Name": "PUPPET_REGION", "Value": t.Ref("AWS::Region"),},
{
"Type": "PARAMETER_STORE",
"Name": "PARTITION",
"Value": t.Ref(partition_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "PUPPET_ROLE_NAME",
"Value": t.Ref(puppet_role_name_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "PUPPET_ROLE_PATH",
"Value": t.Ref(puppet_role_path_parameter),
},
]
if is_codecommit:
template.add_resource(
codecommit.Repository(
"CodeRepo",
RepositoryName=source.get("Configuration").get("RepositoryName"),
RepositoryDescription="Repo to store the servicecatalog puppet solution",
DeletionPolicy="Retain",
)
)
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="CodeCommit",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"RepositoryName": source.get("Configuration").get("RepositoryName"),
"BranchName": source.get("Configuration").get("BranchName"),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges", True
),
},
Name="Source",
)
)
if is_github:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="ThirdParty",
Version="1",
Provider="GitHub",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"Owner": source.get("Configuration").get("Owner"),
"Repo": source.get("Configuration").get("Repo"),
"Branch": source.get("Configuration").get("Branch"),
"OAuthToken": t.Join(
"",
[
"{{resolve:secretsmanager:",
source.get("Configuration").get("SecretsManagerSecret"),
":SecretString:OAuthToken}}",
],
),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges"
),
},
Name="Source",
)
)
if is_custom:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="Custom",
Version=source.get("Configuration").get("CustomActionTypeVersion"),
Provider=source.get("Configuration").get(
"CustomActionTypeProvider"
),
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"GitUrl": source.get("Configuration").get("GitUrl"),
"Branch": source.get("Configuration").get("Branch"),
"PipelineName": t.Sub("${AWS::StackName}-pipeline"),
},
Name="Source",
)
)
webhook = codepipeline.Webhook(
"Webhook",
Authentication="IP",
TargetAction="Source",
AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(
AllowedIPRange=source.get("Configuration").get("GitWebHookIpAddress")
),
Filters=[
codepipeline.WebhookFilterRule(
JsonPath="$.changes[0].ref.id", MatchEquals="refs/heads/{Branch}"
)
],
TargetPipelineVersion=1,
TargetPipeline=t.Sub("${AWS::StackName}-pipeline"),
)
template.add_resource(webhook)
values_for_sub = {
"GitUrl": source.get("Configuration").get("GitUrl"),
"WebhookUrl": t.GetAtt(webhook, "Url"),
}
output_to_add = t.Output("WebhookUrl")
output_to_add.Value = t.Sub("${GitUrl}||${WebhookUrl}", **values_for_sub)
output_to_add.Export = t.Export(t.Sub("${AWS::StackName}-pipeline"))
template.add_output(output_to_add)
if is_codestarsourceconnection:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="AWS",
Version="1",
Provider="CodeStarSourceConnection",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"ConnectionArn": source.get("Configuration").get("ConnectionArn"),
"FullRepositoryId": source.get("Configuration").get(
"FullRepositoryId"
),
"BranchName": source.get("Configuration").get("BranchName"),
"OutputArtifactFormat": source.get("Configuration").get(
"OutputArtifactFormat"
),
},
Name="Source",
)
)
if is_s3:
bucket_name = source.get("Configuration").get("S3Bucket")
if not scm_skip_creation_of_repo:
template.add_resource(
s3.Bucket(
bucket_name,
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(
**{"ServiceCatalogPuppet:Actor": "Framework"}
),
BucketName=bucket_name,
VersioningConfiguration=s3.VersioningConfiguration(
Status="Enabled"
),
)
)
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="S3",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"S3Bucket": bucket_name,
"S3ObjectKey": source.get("Configuration").get("S3ObjectKey"),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges"
),
},
Name="Source",
)
)
single_account_run_project_build_spec = dict(
version=0.2,
phases=dict(
install=install_spec,
build={
"commands": [
'echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml',
"cat parameters.yaml",
"zip parameters.zip parameters.yaml",
"aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip",
]
},
post_build={
"commands": [
"servicecatalog-puppet wait-for-parameterised-run-to-complete",
]
},
),
artifacts=dict(
name="DeployProject",
files=[
"ServiceCatalogPuppet/manifest.yaml",
"ServiceCatalogPuppet/manifest-expanded.yaml",
"results/*/*",
"output/*/*",
"exploded_results/*/*",
"tasks.log",
],
),
)
single_account_run_project_args = dict(
Name="servicecatalog-puppet-single-account-run",
Description="Runs puppet for a single account - SINGLE_ACCOUNT_ID",
ServiceRole=t.GetAtt(deploy_role, "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS",),
TimeoutInMinutes=480,
Environment=codebuild.Environment(
ComputeType=t.Ref(deploy_environment_compute_type_parameter),
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PLAINTEXT",
"Name": "SINGLE_ACCOUNT_ID",
"Value": "CHANGE_ME",
},
]
+ deploy_env_vars,
),
Source=codebuild.Source(
Type="NO_SOURCE",
BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
),
)
single_account_run_project = template.add_resource(
codebuild.Project("SingleAccountRunProject", **single_account_run_project_args)
)
single_account_run_project_build_spec["phases"]["post_build"]["commands"] = [
"servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL"
]
single_account_run_project_args[
"Name"
] = "servicecatalog-puppet-single-account-run-with-callback"
single_account_run_project_args[
"Description"
] = "Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put"
single_account_run_project_args.get("Environment").EnvironmentVariables.append(
{"Type": "PLAINTEXT", "Name": "CALLBACK_URL", "Value": "CHANGE_ME",}
)
single_account_run_project_args["Source"] = codebuild.Source(
Type="NO_SOURCE",
BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
)
single_account_run_project_with_callback = template.add_resource(
codebuild.Project(
"SingleAccountRunWithCallbackProject", **single_account_run_project_args
)
)
stages = [source_stage]
if should_validate:
template.add_resource(
codebuild.Project(
"ValidateProject",
Name="servicecatalog-puppet-validate",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
),
Source=codebuild.Source(
BuildSpec=yaml.safe_dump(
dict(
version="0.2",
phases={
"install": {
"runtime-versions": {"python": "3.7",},
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
},
"build": {
"commands": [
"servicecatalog-puppet validate manifest.yaml"
]
},
},
)
),
Type="CODEPIPELINE",
),
Description="Validate the manifest.yaml file",
)
)
stages.append(
codepipeline.Stages(
Name="Validate",
Actions=[
codepipeline.Actions(
InputArtifacts=[codepipeline.InputArtifacts(Name="Source"),],
Name="Validate",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="ValidateProject")
],
Configuration={
"ProjectName": t.Ref("ValidateProject"),
"PrimarySource": "Source",
},
RunOrder=1,
),
],
)
)
if is_manual_approvals:
deploy_stage = codepipeline.Stages(
Name="Deploy",
Actions=[
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="DryRun",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DryRunProject")
],
Configuration={
"ProjectName": t.Ref("DryRunProject"),
"PrimarySource": "Source",
},
RunOrder=1,
),
codepipeline.Actions(
ActionTypeId=codepipeline.ActionTypeId(
Category="Approval",
Owner="AWS",
Version="1",
Provider="Manual",
),
Configuration={
"NotificationArn": t.Ref("DryRunNotificationTopic"),
"CustomData": "Approve when you are happy with the dry run.",
},
Name="DryRunApproval",
RunOrder=2,
),
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="Deploy",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DeployProject")
],
Configuration={
"ProjectName": t.Ref("DeployProject"),
"PrimarySource": "Source",
},
RunOrder=3,
),
],
)
else:
deploy_stage = codepipeline.Stages(
Name="Deploy",
Actions=[
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="Deploy",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DeployProject")
],
Configuration={
"ProjectName": t.Ref("DeployProject"),
"PrimarySource": "Source",
"EnvironmentVariables": '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]',
},
RunOrder=1,
),
],
)
stages.append(deploy_stage)
pipeline = template.add_resource(
codepipeline.Pipeline(
"Pipeline",
RoleArn=t.GetAtt("PipelineRole", "Arn"),
Stages=stages,
Name=t.Sub("${AWS::StackName}-pipeline"),
ArtifactStore=codepipeline.ArtifactStore(
Type="S3",
Location=t.Sub(
"sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}"
),
),
RestartExecutionOnUpdate=True,
)
)
if is_github:
template.add_resource(
codepipeline.Webhook(
"Webhook",
AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(
SecretToken=t.Join(
"",
[
"{{resolve:secretsmanager:",
source.get("Configuration").get("SecretsManagerSecret"),
":SecretString:SecretToken}}",
],
)
),
Filters=[
codepipeline.WebhookFilterRule(
JsonPath="$.ref",
MatchEquals="refs/heads/"
+ source.get("Configuration").get("Branch"),
)
],
Authentication="GITHUB_HMAC",
TargetPipeline=t.Ref(pipeline),
TargetAction="Source",
Name=t.Sub("${AWS::StackName}-webhook"),
TargetPipelineVersion=t.GetAtt(pipeline, "Version"),
RegisterWithThirdParty="true",
)
)
deploy_project_build_spec = dict(
version=0.2,
phases=dict(
install={
"runtime-versions": dict(python="3.7"),
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
},
pre_build={
"commands": [
"servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml",
]
},
build={
"commands": [
"servicecatalog-puppet --info deploy --num-workers ${NUM_WORKERS} manifest-expanded.yaml",
]
},
),
artifacts=dict(
name="DeployProject",
files=[
"manifest-expanded.yaml",
"results/*/*",
"output/*/*",
"exploded_results/*/*",
"tasks.log",
],
),
)
deploy_project_args = dict(
Name="servicecatalog-puppet-deploy",
ServiceRole=t.GetAtt(deploy_role, "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="CODEPIPELINE",),
TimeoutInMinutes=480,
Environment=codebuild.Environment(
ComputeType=t.Ref(deploy_environment_compute_type_parameter),
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PARAMETER_STORE",
"Name": "NUM_WORKERS",
"Value": t.Ref(num_workers_ssm_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "SPOKE_EXECUTION_MODE_DEPLOY_ENV",
"Value": constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
},
]
+ deploy_env_vars,
),
Source=codebuild.Source(
Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec),
),
Description="deploys out the products to be deployed",
)
deploy_project = template.add_resource(
codebuild.Project("DeployProject", **deploy_project_args)
)
deploy_project_build_spec["phases"]["build"]["commands"] = [
"servicecatalog-puppet --info dry-run manifest-expanded.yaml"
]
deploy_project_build_spec["artifacts"]["name"] = "DryRunProject"
deploy_project_args["Name"] = "servicecatalog-puppet-dryrun"
deploy_project_args["Description"] = "dry run of servicecatalog-puppet-dryrun"
deploy_project_args["Source"] = codebuild.Source(
Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec),
)
dry_run_project = template.add_resource(
codebuild.Project("DryRunProject", **deploy_project_args)
)
bootstrap_project = template.add_resource(
codebuild.Project(
"BootstrapProject",
Name="servicecatalog-puppet-bootstrap-spokes-in-ou",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{"Type": "PLAINTEXT", "Name": "OU_OR_PATH", "Value": "CHANGE_ME"},
{
"Type": "PLAINTEXT",
"Name": "IAM_ROLE_NAME",
"Value": "OrganizationAccountAccessRole",
},
{"Type": "PLAINTEXT", "Name": "IAM_ROLE_ARNS", "Value": ""},
],
),
Source=codebuild.Source(
BuildSpec="version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.7\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n",
Type="NO_SOURCE",
),
Description="Bootstrap all the accounts in an OU",
)
)
template.add_resource(
codebuild.Project(
"BootstrapASpokeProject",
Name="servicecatalog-puppet-bootstrap-spoke",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PLAINTEXT",
"Name": "PUPPET_ACCOUNT_ID",
"Value": t.Sub("${AWS::AccountId}"),
},
{
"Type": "PLAINTEXT",
"Name": "ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN",
"Value": "CHANGE_ME",
},
{
"Type": "PLAINTEXT",
"Name": "ASSUMABLE_ROLE_IN_ROOT_ACCOUNT",
"Value": "CHANGE_ME",
},
],
),
Source=codebuild.Source(
BuildSpec=yaml.safe_dump(
dict(
version=0.2,
phases=dict(
install=install_spec,
build={
"commands": [
"servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN}"
]
},
),
)
),
Type="NO_SOURCE",
),
Description="Bootstrap given account as a spoke",
)
)
cloud_formation_events_queue = template.add_resource(
sqs.Queue(
"CloudFormationEventsQueue",
QueueName="servicecatalog-puppet-cloudformation-events",
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
)
)
cloud_formation_events_queue_policy = template.add_resource(
sqs.QueuePolicy(
"CloudFormationEventsQueuePolicy",
Queues=[t.Ref(cloud_formation_events_queue)],
PolicyDocument={
"Id": "AllowSNS",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-send-message",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": ["sqs:SendMessage"],
"Resource": "*",
"Condition": {
"ArnEquals": {
"aws:SourceArn": t.Sub(
"arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events"
)
}
},
}
],
},
)
)
spoke_deploy_bucket = template.add_resource(
s3.Bucket(
"SpokeDeployBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub("sc-puppet-spoke-deploy-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
caching_bucket = template.add_resource(
s3.Bucket(
"CachingBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
BlockPublicAcls=True,
BlockPublicPolicy=True,
IgnorePublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub(
"sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}"
),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
template.add_output(
t.Output(
"CloudFormationEventsQueueArn",
Value=t.GetAtt(cloud_formation_events_queue, "Arn"),
)
)
template.add_output(t.Output("Version", Value=t.GetAtt(param, "Value")))
template.add_output(
t.Output(
"ManualApprovalsParam", Value=t.GetAtt(manual_approvals_param, "Value")
)
)
template.add_resource(
ssm.Parameter(
"DefaultTerraformVersion",
Type="String",
Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME,
Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE,
)
)
return template
```
#### File: workflow/manifest/generate_manifest_with_ids_task.py
```python
import copy
import glob
import json
import os
import luigi
import yaml
import shutil
import zipfile
from servicecatalog_puppet import config, constants
from servicecatalog_puppet.workflow import tasks
from servicecatalog_puppet.workflow.manifest import manifest_mixin
from servicecatalog_puppet.workflow.portfolio.accessors import (
get_portfolio_by_portfolio_name_task,
)
from servicecatalog_puppet.workflow.portfolio.accessors import (
get_products_and_provisioning_artifacts_task,
)
from servicecatalog_puppet.workflow.general import get_ssm_param_task
class GenerateManifestWithIdsTask(tasks.PuppetTask, manifest_mixin.ManifestMixen):
puppet_account_id = luigi.Parameter()
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"cache_invalidator": self.cache_invalidator,
}
def requires(self):
requirements = dict()
regions = config.get_regions(self.puppet_account_id)
for launch_name, launch_details in self.manifest.get_launches_items():
portfolio = launch_details.get("portfolio")
for region in regions:
if requirements.get(region) is None:
requirements[region] = dict()
regional_details = requirements[region]
if regional_details.get(portfolio) is None:
regional_details[portfolio] = dict(products=dict())
portfolio_details = regional_details[portfolio]
if portfolio_details.get("details") is None:
portfolio_details[
"details"
] = get_portfolio_by_portfolio_name_task.GetPortfolioByPortfolioName(
manifest_file_path=self.manifest_file_path,
portfolio=portfolio,
puppet_account_id=self.puppet_account_id,
account_id=self.puppet_account_id,
region=region,
)
product = launch_details.get("product")
products = portfolio_details.get("products")
if products.get(product) is None:
products[
product
] = get_products_and_provisioning_artifacts_task.GetProductsAndProvisioningArtifactsTask(
manifest_file_path=self.manifest_file_path,
region=region,
portfolio=portfolio,
puppet_account_id=self.puppet_account_id,
)
params = dict()
parameter_by_paths = dict()
requirements["parameters"] = params
requirements["parameter_by_paths"] = parameter_by_paths
home_region = config.get_home_region(self.puppet_account_id)
for section in constants.SECTION_NAMES_THAT_SUPPORTS_PARAMETERS:
for item_name, item_details in self.manifest.get(section, {}).items():
if item_details.get("execution") == constants.EXECUTION_MODE_SPOKE:
for parameter_name, parameter_details in item_details.get(
"parameters", {}
).items():
if parameter_details.get("ssm") and str(
parameter_details.get("ssm").get("account_id", "")
) == str(self.puppet_account_id):
r = parameter_details.get("ssm").get(
"region", config.get_home_region(self.puppet_account_id)
)
name = parameter_details.get("ssm").get("name")
path = parameter_details.get("ssm").get("path", "")
if path == "":
accounts_and_regions = self.manifest.get_account_ids_and_regions_used_for_section_item(
self.puppet_account_id, section, item_name
)
for account_id, regions in accounts_and_regions.items():
for region in regions:
n = name.replace(
"${AWS::AccountId}", account_id
).replace("${AWS::Region}", region)
params[
f"{parameter_name}||{n}||{r}"
] = get_ssm_param_task.GetSSMParamTask(
parameter_name=parameter_name,
name=n,
region=r,
path=parameter_details.get("ssm").get(
"path", ""
),
recursive=parameter_details.get("ssm").get(
"recursive", True
),
depends_on=parameter_details.get("ssm").get(
"depends_on", []
),
manifest_file_path=self.manifest_file_path,
puppet_account_id=self.puppet_account_id,
spoke_account_id=self.puppet_account_id,
spoke_region=r,
)
else:
parameter_by_paths[
path
] = get_ssm_param_task.GetSSMParamByPathTask(
path=parameter_details.get("ssm").get("path", ""),
recursive=parameter_details.get("ssm").get(
"recursive", True
),
region=parameter_details.get("ssm").get(
"recursive", home_region
),
depends_on=parameter_details.get("ssm").get(
"depends_on", []
),
manifest_file_path=self.manifest_file_path,
puppet_account_id=self.puppet_account_id,
spoke_account_id=self.puppet_account_id,
spoke_region=home_region,
)
return requirements
def run(self):
self.debug("starting")
new_manifest = copy.deepcopy(self.manifest)
regions = config.get_regions(self.puppet_account_id)
global_id_cache = dict()
new_manifest["id_cache"] = global_id_cache
for region in regions:
regional_id_cache = dict()
r = self.input().get(region)
for launch_name, launch_details in self.manifest.get_launches_items():
self.debug(
f"processing launch_name={launch_name} in {region} for id_cache generation"
)
target = r.get(launch_details.get("portfolio")).get("details")
portfolio_id = json.loads(target.open("r").read()).get("portfolio_id")
portfolio_name = launch_details.get("portfolio")
if regional_id_cache.get(portfolio_name) is None:
regional_id_cache[portfolio_name] = dict(
id=portfolio_id, products=dict()
)
self.debug(f"added {portfolio_name}={portfolio_id} to id_cache")
product = launch_details.get("product")
target = (
r.get(launch_details.get("portfolio")).get("products").get(product)
)
all_details = json.loads(target.open("r").read())
all_products_and_their_versions = all_details
for p in all_products_and_their_versions:
product_name = p.get("Name")
self.debug(f"processing product_name={product_name}")
if (
regional_id_cache[portfolio_name]["products"].get(product_name)
is None
):
regional_id_cache[portfolio_name]["products"][
product_name
] = dict(id=p.get("ProductId"), versions=dict())
self.debug(f"added {product_name} to id_cache")
for a in p.get("provisioning_artifact_details"):
version_id = a.get("Id")
version_name = a.get("Name")
self.debug(
f"added version {version_name}={version_id} to id_cache"
)
regional_id_cache[portfolio_name]["products"][product_name][
"versions"
][version_name] = version_id
global_id_cache[region] = regional_id_cache
bucket = f"sc-puppet-spoke-deploy-{self.puppet_account_id}"
cached_output_signed_url = None
if self.input().get("parameters") or self.input().get("parameter_by_paths"):
with zipfile.ZipFile(
"output/GetSSMParamTask.zip", "w", zipfile.ZIP_DEFLATED
) as zip:
files = glob.glob("output/GetSSMParam*/**", recursive=True)
for filename in files:
zip.write(filename, filename)
with self.hub_client("s3") as s3:
key = f"{os.getenv('CODEBUILD_BUILD_NUMBER', '0')}-cached-output.zip"
s3.upload_file(
Filename="output/GetSSMParamTask.zip", Bucket=bucket, Key=key,
)
cached_output_signed_url = s3.generate_presigned_url(
"get_object",
Params={"Bucket": bucket, "Key": key},
ExpiresIn=60 * 60 * 24,
)
with self.hub_client("s3") as s3:
manifest_content = yaml.safe_dump(json.loads(json.dumps(new_manifest)))
key = f"{os.getenv('CODEBUILD_BUILD_NUMBER', '0')}.yaml"
self.debug(f"Uploading generated manifest {key} to {bucket}")
s3.put_object(
Body=manifest_content, Bucket=bucket, Key=key,
)
self.debug(f"Generating presigned URL for {key}")
signed_url = s3.generate_presigned_url(
"get_object",
Params={"Bucket": bucket, "Key": key},
ExpiresIn=60 * 60 * 24,
)
self.write_output(
dict(
manifest_content=manifest_content,
signed_url=signed_url,
cached_output_signed_url=cached_output_signed_url,
)
)
``` |
{
"source": "jordaneyres/canarytools-python",
"score": 2
} |
#### File: canarytools-python/canarytools/console.py
```python
import requests
import pytz
import os
import logging
import sys
import time
from datetime import datetime
try:
# python 2
import ConfigParser as configparser
except ImportError:
# python 3
import configparser
from .models.devices import Devices
from .models.incidents import Incidents
from .models.settings import Settings
from .models.canarytokens import CanaryTokens
from .models.result import Result
from .models.update import Updates
from .exceptions import ConfigurationError, ConsoleError, InvalidAuthTokenError, \
ConnectionError, DeviceNotFoundError, IncidentNotFoundError, InvalidParameterError, \
UpdateError, CanaryTokenError
ROOT = 'https://{0}.canary.tools/api/v1/'
RESULT_SUCCESS = 'success'
RESULT_ERROR = 'error'
logger = logging.getLogger('canarytools')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stderr))
class Console(object):
def __init__(self, domain=None, api_key=None, timezone=pytz.utc, debug=False, debug_level=logging.DEBUG):
"""Initialize Console object. All API calls are made with this object
:param domain: The domain of the Canary console
:param api_key: The API key received on your Canary console
:param timezone: The timezone to be used when displaying objects with datetime information. ``pytz``
timezones to be used
:param debug: Debug flag for debugging requests/responses
:param debug_level: Debug level. ``logging`` debug level used. ``logging.DEBUG`` will display all
requests and responses as well as response data. ``logging.INFO`` will only log the requests and responses.
The default is ``logging.DEBUG``
:except ConfigurationError: Domain and/or API auth token not set
Usage::
>>> import canarytools
>>> console = canarytools.Console(domain='console_domain', api_key='test_key')
>>> import canarytools
>>> import logging
>>> console = canarytools.Console(debug=True)
"""
if domain is None and api_key is None:
if 'CANARY_API_DOMAIN' in os.environ and 'CANARY_API_TOKEN' in os.environ:
domain = os.getenv('CANARY_API_DOMAIN')
api_key = os.getenv('CANARY_API_TOKEN')
else:
# try load from disk
domain, api_key = self.read_config()
if domain is None or api_key is None:
raise ConfigurationError("Domain and/or API auth token not set.")
if debug:
self.level = debug_level
else:
self.level = logging.NOTSET
self.domain = domain
self.api_key = api_key
global ROOT
ROOT = ROOT.format(self.domain)
self.tz = timezone
self.session = requests.session()
self.session.params = {'auth_token': api_key}
self.devices = Devices(self)
self.incidents = Incidents(self)
self.settings = Settings(self)
self.tokens = CanaryTokens(self)
self.updates = Updates(self)
def ping(self):
"""Tests the connection to the Canary Console
:return: Returns ``True`` if a connection could be established
and ``False`` otherwise
:rtype: bool
Usage::
>>> import canarytools
>>> console = canarytools.Console()
>>> console.ping()
True
"""
params = {}
result = self.get('ping', params)
if result.result == RESULT_SUCCESS:
return True
else:
return False
def post(self, url, params, parser=None, files={}):
"""Post request
:param url: Url of the API endpoint
:param params: List of parameters to be sent
:param parser: The function used to parse JSON data into an specific object
:param files: Files to be uploaded
:return: Object(s) or a Result Indicator Object
"""
try:
self.log('[{datetime}] POST to {ROOT}{url}.json: {params}'.format(
datetime=datetime.now(self.tz), ROOT=ROOT, url=url, params=params))
start = time.time()
r = self.session.post(url="{0}{1}".format(ROOT, url), data=params, files=files)
complete = time.time() - start
self.log(
'[{datetime}] Received {response_code} in {:.2f}ms: '.format(
complete * 1000, datetime=datetime.now(self.tz), response_code=r.status_code), data=r.text)
except requests.exceptions.ConnectionError:
self.throw_connection_error()
return self.handle_response(r.json(), parser)
def get(self, url, params, parser=None):
"""Get request
:param url: Url of the API endpoint
:param params: List of parameters to be sent
:param parser: The function used to parse JSON data into an specific object
:return: Object(s) or a Result Indicator Object
"""
try:
self.log('[{datetime}] GET to {ROOT}{url}.json: {params}'.format(
datetime=datetime.now(self.tz), ROOT=ROOT, url=url, params=params))
start = time.time()
r = self.session.get(url="{0}{1}".format(ROOT, url), params=params)
complete = time.time() - start
self.log(
'[{datetime}] Received {response_code} in {:.2f}ms: '.format(
complete * 1000, datetime=datetime.now(self.tz), response_code=r.status_code), data=r.text)
except requests.exceptions.ConnectionError:
self.throw_connection_error()
return self.handle_response(r.json(), parser)
def delete(self, url, params, parser=None):
"""Delete request
:param url: Url of the API endpoint
:param params: List of parameters to be sent
:param parser: The function used to parse JSON data into an specific object
:return: Object(s) or a Result Indicator Object
"""
try:
self.log('[{datetime}] DELETE to {ROOT}{url}.json: {params}'.format(ROOT=ROOT, url=url, params=params))
start = time.time()
r = self.session.delete(url="{0}{1}".format(ROOT, url), params=params)
complete = time.time() - start
self.log(
'[{datetime}] Received {response_code} in {:.2f}ms: {text}'.format(
complete * 1000, datetime=datetime.now(self.tz), response_code=r.status_code), data=r.text)
except requests.exception.ConnectionError:
self.throw_connection_error()
return self.handle_response(r.json(), parser)
def throw_connection_error(self):
raise ConnectionError(
"Failed to establish a new connection with console at domain: '{domain}'".format(
domain=self.domain))
def read_config(self):
"""Read config from disk
:return: The api_key and the domain
"""
paths = [os.path.expanduser('~/.canarytools.config'), os.path.expanduser('~/canarytools.config'),
'/etc/canarytools.config', 'canarytools.config', 'config/canarytools.config']
config_parser = configparser.RawConfigParser()
for path in paths:
try:
config_parser.read(path)
api_key = config_parser.get('CanaryTools', 'api_key')
domain = config_parser.get('CanaryTools', 'domain')
if api_key and domain:
return domain, api_key
except configparser.NoSectionError:
pass
except configparser.NoOptionError:
pass
return None, None
def handle_response(self, response, parser):
"""Handle JSON response. Check for exceptions and objectify
:param response: The response from the request made to the web API
:param parser: The function used to parse JSON data into an specific object
:return: Object(s) or a Result Indicator Object
"""
if 'result' in response and response['result'] == RESULT_ERROR:
self.handle_exception(response)
else:
if parser:
return parser(response)
else:
return Result.parse(self, response)
def handle_exception(self, response):
"""Handle unsuccessful results returned from the web API
:param response: The response from the request made to the web API
"""
if 'message' in response:
message = response['message']
if message in ERROR_MAP:
raise ERROR_MAP[message]
elif 'Update with tag ' in message:
raise ERROR_MAP['Update with tag %s does not exist.'](message)
raise ConsoleError(message)
raise ConsoleError()
def log(self, msg, data=None):
"""Log debug information based on level
"""
if self.level == logging.INFO:
log_msg = '{log_msg} Please set logging level to INFO, or greater, to see response data payload.'.format(
log_msg=msg)
logger.info(log_msg)
elif self.level == logging.DEBUG:
log_msg = '{log_msg} {data}'.format(log_msg=msg, data=data)
logger.debug(log_msg)
def __repr__(self):
return '<Console %s>' % self.api_key
ERROR_MAP = {
'Invalid auth_token': InvalidAuthTokenError,
'Device not found': DeviceNotFoundError,
'Incident not found': IncidentNotFoundError,
'Settings does not permit updating this canary.':
UpdateError("Settings does not permit updating this canary. "
"Check that automatic updates are not configured in the console."),
'Update with tag %s does not exist.': UpdateError,
'Parameter older_than was invalid.':
InvalidParameterError("Parameter older_than was invalid"),
'Cannot use src_host and node_id together':
InvalidParameterError("Cannot use src_host and node_id together"),
'Empty memo':
InvalidParameterError("Please specify a memo when creating a Canarytoken "
"to remind yourself where you intend to use it :)"),
'Supplied kind is not valid.':
InvalidParameterError("Supplied kind is not valid when creating a Canarytoken"),
'Could not process the parameters':
InvalidParameterError("Error occurred while creating a Canarytoken. "
"Please ensure all required parameters are present and in the correct format."),
'Could not process the parameters. cloned_web is invalid, not enough domain labels':
InvalidParameterError("The parameter cloned_web is invalid, not enough domain labels"),
'Could not save Canarydrop': CanaryTokenError('Could not save Canarydrop'),
'Could not process the parameters': CanaryTokenError('Could not process the parameters'),
'Could not find the Canarytoken': CanaryTokenError('Could not find the Canarytoken'),
'Could not decode the memo': CanaryTokenError('Could not decode the memo'),
'Could not delete Canarydrop': CanaryTokenError('Could not delete Canarydrop'),
'File generation not supported.': CanaryTokenError('File generation not supported.'),
}
```
#### File: canarytools/models/update.py
```python
from .base import CanaryToolsBase
class Updates(object):
def __init__(self, console):
"""Initialize Update object
:param console: The Console from which API calls are made
"""
self.console = console
def list_updates(self):
"""List of available updates
:return: List of Update objects
:rtype: List of :class:`Update <Update>` objects
Usage::
>>> import canarytools
>>> updates = canarytools.updates.list_updates()
"""
params = {}
return self.console.get('updates/list', params, self.parse)
def update_device(self, node_id, update_tag):
"""Update the device
:param node_id: The node_id of the device to be updated
:param update_tag: The tag of the update to be updated to
:return: A Result object
:rtype: :class:`Result <Result>` object
:except UpdateError: Device update not permitted. Automatic updates are not configured. Or the update tag does
not exist.
Usage::
>>> import canarytools
>>> result = canarytools.updates.update_device(node_id='00000000ff798b93', update_tag='4ae023bdf75f14c8f08548bf5130e861')
"""
params = {'node_id': node_id, 'update_tag': update_tag}
return self.console.post('device/update', params)
def parse(self, data):
"""Parse JSON data
:param data: JSON data
:return: A list of Update objects
"""
updates = list()
if 'updates' in data:
for update in data['updates']:
updates.append(Update.parse(self.console, update))
return updates
class Update(CanaryToolsBase):
def __init__(self, console, data):
"""Initialize an Update object
:param console: The Console from which API calls are made
:param data: JSON data
**Attributes:**
- **supported_versions (list)** --List of Canary versions that support this update
- **description (str)** -- Description of the update
- **filename (str)** -- Name of update file
- **ignore (bool)** -- Should this update be ignored?
- **tag (str)** -- Update tag. Used to uniquely identify an update.
- **version (str)** -- Version to which the Canary is updated.
"""
super(Update, self).__init__(console, data)
def __setattr__(self, key, value):
"""Helper method
"""
if 'ignore' == key:
value = value == 'True'
super(Update, self).__setattr__(key, value)
def __str__(self):
"""Helper method"""
return "[Update] description: {description} version: {version}".format(
description=self.description, version=self.version)
``` |
{
"source": "jordanfeldman/s3pypi",
"score": 2
} |
#### File: s3pypi/s3pypi/package.py
```python
import logging
import os
import re
import sys
from collections import defaultdict
from subprocess import check_output, CalledProcessError
from jinja2 import Environment, PackageLoader
from s3pypi import __prog__
from s3pypi.exceptions import S3PyPiError
__author__ = '<NAME>'
__copyright__ = 'Copyright 2016, November Five'
__license__ = 'MIT'
log = logging.getLogger()
class Package(object):
"""Python package."""
def __init__(self, name, files):
self.name, self.version = name.rsplit('-', 1)
self.files = set(files)
def __str__(self):
return '%s-%s' % (self.name, self.version)
def _attrs(self):
return self.name, self.version
def __lt__(self, other):
return self.version < other.version
def __eq__(self, other):
return isinstance(other, Package) and self._attrs() == other._attrs()
def __hash__(self):
return hash(self._attrs())
@property
def directory(self):
return re.sub(r'[-_.]+', '-', self.name.lower())
@staticmethod
def _find_package_name(text):
match = re.search(r'^(copying files to|making hard links in) (.+)\.\.\.', text, flags=re.MULTILINE)
if not match:
raise RuntimeError('Package name not found! (use --verbose to view output)')
return match.group(2)
@staticmethod
def _find_wheel_name(text):
match = re.search(r"creating '.*?(dist.*\.whl)' and adding", text, flags=re.MULTILINE)
if not match:
raise RuntimeError('Wheel name not found! (use --verbose to view output)')
return match.group(1)
@staticmethod
def create(wheel=True, sdist=True):
cmd = [sys.executable, 'setup.py', 'sdist', '--formats', 'gztar']
if wheel:
cmd.append('bdist_wheel')
log.debug("Package create command line: {}".format(' '.join(cmd)))
try:
stdout = check_output(cmd).decode().strip()
except CalledProcessError as e:
raise RuntimeError(e.output.rstrip())
log.debug(stdout)
name = Package._find_package_name(stdout)
files = []
if sdist:
files.append(name + '.tar.gz')
if wheel:
files.append(os.path.basename(Package._find_wheel_name(stdout)))
log.debug("Package name: {}".format(name))
log.debug("Files to upload: {}".format(files))
return Package(name, files)
class Index(object):
"""Index of package versions, to be rendered to HTML."""
template = Environment(loader=PackageLoader(__prog__, 'templates')).get_template('index.html.j2')
def __init__(self, packages):
self.packages = set(packages)
@staticmethod
def parse(html):
filenames = defaultdict(set)
for match in re.findall(r'<a href=".+">((.+?-\d+\.\d+\.\d+).+)</a>', html):
filenames[match[1]].add(match[0])
return Index(Package(name, files) for name, files in filenames.items())
def to_html(self):
return self.template.render({'packages': self.packages})
def add_package(self, package, force=False):
if force:
self.packages.discard(package)
elif any(p.version == package.version for p in self.packages):
raise S3PyPiError(
'%s already exists! You should use a different version (use --force to override).' % package)
self.packages.add(package)
``` |
{
"source": "jordanfelle/SplunkAdmins",
"score": 2
} |
#### File: SplunkAdmins/bin/lookup_watcher.py
```python
from __future__ import print_function
import requests
import logging
from logging.config import dictConfig
import os
import sys
import xml.dom.minidom, xml.sax.saxutils
from subprocess import Popen, PIPE
from time import sleep
from lookup_watcher_class import LookupWatcher
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "lib"))
from splunklib.six.moves import range
"""
Lookup Watcher
Check the filesystem for lookup files, check the current size
record stats around last update time, previous update time
how regularly updated occur
Store size & stats into a kvstore for usage by admins
"""
#Define the XML scheme for the inputs page
SCHEME = """<scheme>
<title>Lookup Watcher</title>
<description>Watch lookup files on the Splunk filesystem and record the size and most recent update stats in a kvstore file</description>
<use_external_validation>false</use_external_validation>
<streaming_mode>simple</streaming_mode>
<endpoint>
<args>
<arg name="debugMode">
<title>debugMode</title>
<description>turn on DEBUG level logging (defaults to INFO) (true/false)</description>
<validation>is_bool('debugMode')</validation>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
#Get the XML for validation
def get_validation_data():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logger.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logger.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logger.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
# prints XML error data to be consumed by Splunk
def print_error(s):
print("<error><message>%s</message></error>" % xml.sax.saxutils.escape(s))
#Run an OS process with a timeout, this way if a command gets "stuck" waiting for input it is killed
def runOSProcess(command, timeout=10):
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
for t in range(timeout):
sleep(1)
if p.poll() is not None:
#return p.communicate()
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
return stdoutdata, stderrdata, False
else:
return stdoutdata, stderrdata, True
p.kill()
return "", "timeout after %s seconds" % (timeout), False
#Validate the arguments to the app to ensure this will work...
def validate_arguments():
#val_data = get_validation_data()
return
#Print the scheme
def do_scheme():
print(SCHEME)
splunkLogsDir = os.environ['SPLUNK_HOME'] + "/var/log/splunk"
#Setup the logging
logging_config = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.WARN},
'file': {'class' : 'logging.handlers.RotatingFileHandler',
'filename' : splunkLogsDir + '/lookup_watcher.log',
'formatter': 'f',
'maxBytes' : 2097152,
'level': logging.DEBUG,
'backupCount' : 5 }
},
root = {
'handlers': ['h','file'],
'level': logging.DEBUG,
},
)
dictConfig(logging_config)
logger = logging.getLogger()
logging.getLogger().setLevel(logging.INFO)
# Script must implement these args: scheme, validate-arguments
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
validate_arguments()
else:
pass
else:
vc = LookupWatcher()
vc.run_script()
sys.exit(0)
``` |
{
"source": "JordanFitz/ultimate-cli",
"score": 3
} |
#### File: JordanFitz/ultimate-cli/result.py
```python
import json
class Result:
def __str__(self):
return json.dumps(self.__dict__)
def __init__(self, artist, song, rating, kind, url, rating_id):
self.artist = artist
self.song = song
self.rating = rating
self.kind = kind
self.url = url
self.rating_id = rating_id
def get_tabular(self):
return [
self.artist,
self.song,
self.rating,
self.kind
]
```
#### File: JordanFitz/ultimate-cli/scraper.py
```python
import requests
import json
from bs4 import BeautifulSoup
from result import Result
BASE = "https://www.ultimate-guitar.com/"
URLS = {
"search": BASE + "search.php"
}
def build_url(name, **kwargs):
url = URLS[name] + "?"
for key, value in kwargs.items():
url += key + "=" + value + "&"
return url[:-1]
class Scraper:
def __init__(self, url):
self.url = url
self.__load_data()
def __load_data(self):
request = requests.get(self.url, allow_redirects=True)
self.soup = BeautifulSoup(request.content, features="html.parser")
store = self.soup.select_one(".js-store")["data-content"]
store = json.loads(store)
self.data = store["store"]["page"]["data"]
class TabScraper(Scraper):
def __init__(self, url):
super().__init__(url)
def get_tab(self):
return self.data["tab_view"]["wiki_tab"]["content"]
class SearchScraper(Scraper):
def __init__(self, query):
super().__init__(build_url(
"search", search_type="title", value=query
))
def get_results(self):
results = []
for raw_result in self.data["results"]:
rating_number = round(raw_result.get("rating", 0))
votes = raw_result.get("votes", 0)
rating = "{}{} ({})".format("★" * rating_number, "☆" * (5 - rating_number), votes)
results.append(Result(
artist=raw_result.get("artist_name", "Unknown Artist"),
song=raw_result.get("song_name", "Unknown Song"),
rating=rating,
kind=raw_result.get("type", "Unknown"),
url=raw_result.get("tab_url", None),
rating_id="{}{}".format(rating_number, votes)
))
return results
``` |
{
"source": "jordanflowers/24alarmclock",
"score": 3
} |
#### File: jordanflowers/24alarmclock/init.py
```python
import sys
import PyQt5
import datetime
from audioDict import audioFiles
from PyQt5.QtWidgets import QWidget
from PyQt5 import QtMultimedia
class Clock(QWidget):
def __init__(self):
super(Clock, self).__init__()
# Initialize Clock
self.alarm = None
self.alarmtime = datetime.time()
#self.alarmtime = self.alarmtime.replace(hour=7, minute=0)
self.getAlarmTime()
self.initGui()
self.showFullScreen()
self.initClock()
def getAlarmTime(self):
f = open ("alarmtime.txt", "r+")
for line in f:
line = line.split(':')
h = line[0]
m = line[1]
self.alarmtime = self.alarmtime.replace(hour=int(h), minute=int(m))
f.close()
# Function that iniitializes the GUI
def initGui(self):
# Set Background Color
colors = self.palette()
colors.setColor(self.backgroundRole(), PyQt5.QtGui.QColor('black'))
self.setPalette(colors)
# Set custom Fonts
font_db = PyQt5.QtGui.QFontDatabase()
font_id = font_db.addApplicationFont("Digital Dismay.otf")
fontChoice = PyQt5.QtGui.QFont("Digital Dismay", 150)
# Initialize the time
self.clockText = PyQt5.QtWidgets.QLabel()
self.updateTimer()
self.clockText.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
self.clockText.setFont(fontChoice)
self.clockText.setStyleSheet("QLabel { color : #BEAB40; }")
# Initialize the Vbox
self.vboxClock = PyQt5.QtWidgets.QVBoxLayout()
self.vboxClock.addWidget(self.clockText)
self.setLayout(self.vboxClock)
# Function that Initializes the timer for the clock
def initClock(self):
self.timer = PyQt5.QtCore.QTimer()
self.timer.timeout.connect(self.updateTimer)
self.timer.start(1000)
# Update the clock to current time
def updateTimer(self):
now = datetime.datetime.now()
#now = now.replace(hour=8, minute=0, second=0)
militaryh = now.strftime("%H")
h = now.strftime("%I")
m = now.strftime("%M")
s = now.strftime("%S")
self.clockText.setText(h+":"+m+":"+s)
alarmh = self.alarmtime.strftime("%I")
alarmm = self.alarmtime.strftime("%M")
if h == alarmh and int(m) == int(alarmm)-1 and s == "53":
self.setOffTheAlarm()
elif militaryh in audioFiles:
if m == "00" and s == "00":
self.alarm = QtMultimedia.QSound(audioFiles[militaryh])
self.alarm.play()
elif m == "29" and s == "57":
self.alarm = QtMultimedia.QSound("audio/beeping.wav")
self.alarm.play()
def setOffTheAlarm(self):
self.alarm = QtMultimedia.QSound("audio/mainAlarm.wav")
self.alarm.play()
def mousePressEvent(self, event):
self.press = datetime.datetime.now()
def mouseReleaseEvent(self, event):
self.release = datetime.datetime.now()
if (self.release-self.press).total_seconds() < 1:
if self.alarm != None:
if self.alarm.isFinished() == False:
self.alarm.stop()
return
self.initAlarmSetterGUI()
def alarmClicked(self, event):
time = self.alarmSetter.clockText.text().split(':')
h = int(time[0])
m = int(time[1])
self.alarmtime = self.alarmtime.replace(hour=h, minute=m)
f = open("alarmtime.txt", 'w')
f.write(time[0] + ":" + time[1])
f.close()
self.showFullScreen()
self.alarmSetter.hide()
def initAlarmSetterGUI(self):
self.alarmSetter = QWidget()
# Set Background Color
colors = self.palette()
colors.setColor(self.backgroundRole(), PyQt5.QtGui.QColor('black'))
self.alarmSetter.setPalette(colors)
# Set custom Fonts
font_db = PyQt5.QtGui.QFontDatabase()
font_id = font_db.addApplicationFont("Digital Dismay.otf")
fontChoice = PyQt5.QtGui.QFont("Digital Dismay", 150)
self.alarmSetter.clockText = PyQt5.QtWidgets.QLabel()
self.alarmSetter.clockText.mousePressEvent = self.alarmClicked
h = self.alarmtime.strftime("%H")
m = self.alarmtime.strftime("%M")
self.alarmSetter.clockText.setText(h+":"+m)
self.alarmSetter.clockText.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
self.alarmSetter.clockText.setFont(fontChoice)
self.alarmSetter.clockText.setStyleSheet("QLabel { color : #BEAB40; }")
self.alarmSetter.vboxClock = PyQt5.QtWidgets.QVBoxLayout()
self.alarmSetter.hboxClockUpButtons = PyQt5.QtWidgets.QHBoxLayout()
self.alarmSetter.hboxClockDownButtons = PyQt5.QtWidgets.QHBoxLayout()
self.alarmSetter.hourButtonUp = PyQt5.QtWidgets.QPushButton()
self.alarmSetter.hourButtonUp.clicked.connect(self.hourUp)
self.alarmSetter.hourButtonUp.setIcon(PyQt5.QtGui.QIcon(PyQt5.QtGui.QPixmap("buttons/buttonUp.png")))
self.alarmSetter.hourButtonUp.setStyleSheet("QPushButton { height: 120px; width: 120px; background-color: black;}")
self.alarmSetter.minuteButtonUp = PyQt5.QtWidgets.QPushButton()
self.alarmSetter.minuteButtonUp.clicked.connect(self.minuteUp)
self.alarmSetter.minuteButtonUp.setIcon(PyQt5.QtGui.QIcon(PyQt5.QtGui.QPixmap("buttons/buttonUp.png")))
self.alarmSetter.minuteButtonUp.setStyleSheet("QPushButton { height: 120px; width: 120px; background-color: black;}")
self.alarmSetter.hourButtonDown = PyQt5.QtWidgets.QPushButton()
self.alarmSetter.hourButtonDown.clicked.connect(self.hourDown)
self.alarmSetter.hourButtonDown.setIcon(PyQt5.QtGui.QIcon(PyQt5.QtGui.QPixmap("buttons/buttonDown.png")))
self.alarmSetter.hourButtonDown.setStyleSheet("QPushButton { height: 120px; width: 120px; background-color: black;}")
self.alarmSetter.minuteButtonDown = PyQt5.QtWidgets.QPushButton()
self.alarmSetter.minuteButtonDown.clicked.connect(self.minuteDown)
self.alarmSetter.minuteButtonDown.setIcon(PyQt5.QtGui.QIcon(PyQt5.QtGui.QPixmap("buttons/buttonDown.png")))
self.alarmSetter.minuteButtonDown.setStyleSheet("QPushButton { height: 120px; width: 120px; background-color: black;}")
self.alarmSetter.hboxClockUpButtons.addWidget(self.alarmSetter.hourButtonUp)
self.alarmSetter.hboxClockUpButtons.addWidget(self.alarmSetter.minuteButtonUp)
self.alarmSetter.vboxClock.addLayout(self.alarmSetter.hboxClockUpButtons)
self.alarmSetter.vboxClock.addWidget(self.alarmSetter.clockText)
self.alarmSetter.hboxClockDownButtons.addWidget(self.alarmSetter.hourButtonDown)
self.alarmSetter.hboxClockDownButtons.addWidget(self.alarmSetter.minuteButtonDown)
self.alarmSetter.vboxClock.addLayout(self.alarmSetter.hboxClockDownButtons)
self.alarmSetter.setLayout(self.alarmSetter.vboxClock)
self.alarmSetter.showFullScreen()
self.hide()
def minuteUp(self):
time = self.alarmSetter.clockText.text().split(':')
minute = int(time[1])
minute += 1
minute = minute % 60
self.alarmSetter.clockText.setText(time[0]+":" + '%02d' % minute)
def minuteDown(self):
time = self.alarmSetter.clockText.text().split(':')
minute = int(time[1])
minute -= 1
if minute < 0:
minute = 59
self.alarmSetter.clockText.setText(time[0]+":" + '%02d' % minute)
def hourUp(self):
time = self.alarmSetter.clockText.text().split(':')
hour = int(time[0])
hour += 1
hour = hour % 24
self.alarmSetter.clockText.setText('%02d' % hour +":" +time[1])
def hourDown(self):
time = self.alarmSetter.clockText.text().split(':')
hour = int(time[0])
hour -= 1
if hour < 0:
hour = 23
self.alarmSetter.clockText.setText('%02d' % hour +":" +time[1])
def main():
app = PyQt5.QtWidgets.QApplication(sys.argv)
myClock = Clock()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
``` |
{
"source": "jordang512/GAN_inpainting_project",
"score": 3
} |
#### File: GAN_inpainting_project/GAN_inpainting_model/mask_rcnn_testing.py
```python
from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
from PIL import Image
import sys
import numpy as np
def get_masks(x, orig_img):
net = model_zoo.get_model('mask_rcnn_resnet50_v1b_coco', pretrained=True)
threshold = 0.5
#x, orig_img = data.transforms.presets.rcnn.transform_test(image)
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in net(x)]
# x is index, int(y[0]) is category id
filtered_ids = np.array([(x,int(y[0])) for x,y in enumerate(ids) if scores[x] > threshold])
class_names = net.classes
# Prompt user to select a category
print("I found these categories: ")
unique_classes = list(set([class_names[item[1]] for item in filtered_ids]))
for idx,item in enumerate(unique_classes):
print("{}: {}".format(idx,item))
print("Please select one category by entering the number next to it")
# To get the category id, convert input->class->index of class
selection = net.classes.index(unique_classes[int(input("My choice is: "))])
# Prune scores, masks, boxes, and ids by selection
# It's important to define these as np.array's
scores = np.array([scores[item[0]] for item in filtered_ids if item[1]==selection])
masks = np.array([masks[item[0]] for item in filtered_ids if item[1]==selection])
bboxes = np.array([bboxes[item[0]] for item in filtered_ids if item[1]==selection])
ids = np.array([item[1] for item in filtered_ids if item[1]==selection])
# paint segmentation mask on images directly
width, height = orig_img.shape[1], orig_img.shape[0]
masks = utils.viz.expand_mask(masks, bboxes, (width, height), scores)
orig_img = utils.viz.plot_mask(orig_img, masks)
# identical to Faster RCNN object detection
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax = utils.viz.plot_bbox(orig_img, bboxes, scores, ids,
class_names=net.classes, ax=ax)
plt.show()
return masks
```
#### File: GAN_inpainting_project/GAN_inpainting_model/video_process.py
```python
import imageio
import mxnet
from gluoncv import model_zoo, data, utils
from matplotlib import pyplot as plt
from PIL import Image
import sys
import numpy as np
from scipy.signal import convolve2d
# from inpainting_pipeline import expand_masks, erase_masks
import argparse
import cv2
import tensorflow as tf
import neuralgym as ng
from inpaint_model import InpaintCAModel
fname = 'baseballshort.gif'
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str,
help='The filename of image to be completed.')
parser.add_argument('--mask', default='', type=str,
help='The filename of mask, value 255 indicates mask.')
parser.add_argument('--output', default='output.png', type=str,
help='Where to write output.')
parser.add_argument('--checkpoint_dir', default='', type=str,
help='The directory of tensorflow checkpoint.')
def expand_masks(masks, ksize):
kernel = np.ones((ksize, ksize))
expanded = convolve2d(masks, kernel, mode='same')
return (expanded > 0) * 255
def erase_masks(fpath):
x, im = downsize_file(fpath)
masks = get_masks(x, im)
if masks.ndim == 3:
compiled_mask = np.amax(masks, axis=0)
else:
compiled_mask = masks
compiled_mask = expand_masks(compiled_mask, 21) #convolve with a 11 x 11 kernel to expand masks for inpainting
compiled_mask = np.array([compiled_mask for _ in range(3)])
compiled_mask = np.moveaxis(compiled_mask, 0, -1)
compiled_mask = compiled_mask * 255. / np.amax(compiled_mask)
compiled_mask = compiled_mask.astype(int)
print(compiled_mask.shape)
print(im.shape)
# cv2.imwrite("mask.png", compiled_mask)
test_model(im, compiled_mask)
def test_model(image, mask, output_dir='output_images/output.png', checkpoint_dir='model_logs/release_places2_256'):
ng.get_gpus(1)
model = InpaintCAModel()
assert image.shape == mask.shape
h, w, _ = image.shape
grid = 8
image = image[:h//grid*grid, :w//grid*grid, :]
mask = mask[:h//grid*grid, :w//grid*grid, :]
print('Shape of image: {}'.format(image.shape))
image = np.expand_dims(image, 0)
mask = np.expand_dims(mask, 0)
input_image = np.concatenate([image, mask], axis=2)
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
input_image = tf.constant(input_image, dtype=tf.float32)
output = model.build_server_graph(input_image)
output = (output + 1.) * 127.5
output = tf.reverse(output, [-1])
output = tf.saturate_cast(output, tf.uint8)
# load pretrained model
vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
assign_ops = []
for var in vars_list:
vname = var.name
from_name = vname
var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name)
assign_ops.append(tf.assign(var, var_value))
sess.run(assign_ops)
print('Model loaded.')
result = sess.run(output)
# cv2.imwrite(output_dir, result[0][:, :, ::-1])
# plt.imsave('out.jpg', result[0][:, :, ::-1])
return result[0]
def get_masks(x, orig_img, net, class_to_remove):
threshold = 0.5
#x, orig_img = data.transforms.presets.rcnn.transform_test(image)
ids, scores, bboxes, masks = [xx[0].asnumpy() for xx in net(x)]
# x is index, int(y[0]) is category id
filtered_ids = np.array([(x,int(y[0])) for x,y in enumerate(ids) if scores[x] > threshold])
# Prune scores, masks, boxes, and ids by selection
# It's important to define these as np.array's
scores = np.array([scores[item[0]] for item in filtered_ids if item[1]==class_to_remove])
masks = np.array([masks[item[0]] for item in filtered_ids if item[1]==class_to_remove])
bboxes = np.array([bboxes[item[0]] for item in filtered_ids if item[1]==class_to_remove])
ids = np.array([item[1] for item in filtered_ids if item[1]==class_to_remove])
if not masks:
return []
width, height = orig_img.shape[1], orig_img.shape[0]
masks = utils.viz.expand_mask(masks, bboxes, (width, height), scores)
return masks
def process_video(fname):
vid = imageio.get_reader(fname, 'ffmpeg')
frames = []
for idx, f in enumerate(vid):
im = vid.get_data(idx)
frame, orig_im = data.transforms.presets.ssd.transform_test(mxnet.nd.array(im),600)
frames.append((frame, orig_im))
finished_frames = []
net = model_zoo.get_model('mask_rcnn_resnet50_v1b_coco', pretrained=True)
print([(x,y) for (x,y) in enumerate(net.classes)])
class_to_remove = input("Please enter a class index: ")
print(len(frames))
for count, frame in enumerate(frames):
painted = process_frame(frame, net, class_to_remove)
finished_frames.append(painted)
print("Finished frame {}".format(count))
imageio.mimsave('outgif.gif',frames)
def process_frame(frame, net, class_to_remove):
masks = get_masks(frame[0], frame[1], net, class_to_remove)
if masks == []:
return frame[1]
if masks.ndim == 3:
compiled_mask = np.amax(masks, axis=0)
else:
compiled_mask = masks
compiled_mask = expand_masks(compiled_mask, 21) #convolve with a 11 x 11 kernel to expand masks for inpainting
compiled_mask = np.array([compiled_mask for _ in range(3)])
compiled_mask = np.moveaxis(compiled_mask, 0, -1)
compiled_mask = compiled_mask * 255. / np.amax(compiled_mask)
compiled_mask = compiled_mask.astype(int)
print(compiled_mask.shape)
print(frame[1].shape)
cv2.imwrite("mask.png", compiled_mask)
return test_model(frame[1], compiled_mask)
process_video(fname)
``` |
{
"source": "JordanG8/Bahr",
"score": 3
} |
#### File: Bahr/game1/DicesSet.py
```python
import random
d6Nums = range(1,7)
d6_1 = random.choice(d6Nums)
d6_2 = random.choice(d6Nums)
def d6_2Set():
d6_1 = random.choice(d6Nums)
d6_2 = random.choice(d6Nums)
return [d6_1, d6_2]
```
#### File: Bahr/game1/Welcome.py
```python
def welcome():
print("Hello players! Welcome to Bahr's Digital Dice Games!\nTo try out your digital dice enter 'roll'\nYou can also pass by entering 'pass'")
while True:
inp1 = input()
if inp1 == "roll":
from DicesSet import d6_2Set
print("Great!\nYour roll is:" + str(d6_2Set()))
break
elif inp1 == "pass":
print("Okay")
break
else:
print("Please enter either 'roll' or 'pass'.")
``` |
{
"source": "jordangeorge/pixiedust",
"score": 2
} |
#### File: pixiedust/display/__init__.py
```python
import warnings
from IPython.core.getipython import get_ipython
from pixiedust.utils.environment import Environment as PD_Environment
__all__ = ['addDisplayRunListener', 'display']
#Make sure that matplotlib is running inline
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
get_ipython().run_line_magic("matplotlib", "inline")
except NameError:
#IPython not available we must be in a spark executor
pass
displayRunListeners = []
def addDisplayRunListener(listener):
global displayRunListeners
displayRunListeners.append(listener)
from .display import *
from .chart import *
if PD_Environment.hasSpark:
from .graph import *
from .table import *
from .download import *
from .datahandler import getDataHandler
from pixiedust.utils.printEx import *
import traceback
import uuid
import pixiedust
from six import string_types
myLogger=pixiedust.getLogger(__name__ )
def display(entity, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
#todo: use ConverterRegistry
def toPython(entity):
from py4j.java_gateway import JavaObject
if entity is None or not isinstance(entity, JavaObject):
return entity
clazz = entity.getClass().getName()
if clazz == "org.apache.spark.sql.Dataset":
entity = entity.toDF()
clazz = "org.apache.spark.sql.DataFrame"
if clazz == "org.apache.spark.sql.DataFrame":
from pyspark.sql import DataFrame, SQLContext
from pyspark import SparkContext
entity = DataFrame(entity, SQLContext(SparkContext.getOrCreate(), entity.sqlContext()))
return entity
callerText = traceback.extract_stack(limit=2)[0][3]
if (callerText is None or callerText == "") and 'pixiedust_display_callerText' in globals():
callerText = globals()['pixiedust_display_callerText']
pr = None
try:
if "cell_id" in kwargs and "showchrome" not in kwargs and "handlerId" in kwargs:
if "gen_tests" in kwargs:
#remove gen_tests from command line
import re
m = re.search(",\\s*gen_tests\\s*=\\s*'((\\\\'|[^'])*)'", str(callerText), re.IGNORECASE)
if m is not None:
callerText = callerText.replace(m.group(0),"")
#generate new prefix
p = re.search(",\\s*prefix\\s*=\\s*'((\\\\'|[^'])*)'", str(callerText), re.IGNORECASE)
if p is not None:
prefix = ''.join([",prefix='", str(uuid.uuid4())[:8], "'"])
callerText = callerText.replace(p.group(0), prefix)
get_ipython().set_next_input(callerText)
if "profile" in kwargs:
import cProfile
pr = cProfile.Profile()
pr.enable()
scalaKernel = False
if callerText is None or callerText == "" and hasattr(display, "fetchEntity"):
callerText, entity = display.fetchEntity(entity)
entity = toPython(entity)
scalaKernel = True
#get a datahandler and displayhandler for this entity
dataHandler = getDataHandler(kwargs, entity)
selectedHandler = getSelectedHandler(kwargs, entity, dataHandler)
#notify listeners of a new display Run
for displayRunListener in displayRunListeners:
displayRunListener(entity, kwargs)
#check if we have a job monitor id
from pixiedust.utils.sparkJobProgressMonitor import progressMonitor
if progressMonitor:
progressMonitor.onDisplayRun(kwargs.get("cell_id"))
myLogger.debug("Creating a new display handler with options {0}: {1}".format(kwargs, selectedHandler))
displayHandler = selectedHandler.newDisplayHandler(kwargs,entity)
if displayHandler is None:
printEx("Unable to obtain handler")
return
displayHandler.handlerMetadata = selectedHandler
displayHandler.dataHandler = dataHandler
displayHandler.callerText = callerText
if scalaKernel:
displayHandler.scalaKernel = True
if displayHandler.callerText is None:
printEx("Unable to get entity information")
return
displayHandler.render()
finally:
if pr is not None:
import pstats, StringIO
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
myLogger.debug(s.getvalue())
```
#### File: display/streaming/__init__.py
```python
from abc import abstractmethod, ABCMeta
from six import with_metaclass
class StreamingDataAdapter(with_metaclass(ABCMeta)):
def __init__(self):
self.channels = []
def getNextData(self):
nextData = self.doGetNextData()
if nextData is not None and hasattr(self, "channels"):
for channel in self.channels:
channel.processNextData(nextData)
return nextData
@abstractmethod
def doGetNextData(self):
"""Return the next batch of data from the underlying stream.
Accepted return values are:
1. (x,y): tuple of list/numpy arrays representing the x and y axis
2. pandas dataframe
3. y: list/numpy array representing the y axis. In this case, the x axis is automatically created
4. pandas serie: similar to #3
"""
pass
def getStreamingChannel(self, processfn, initialData = None):
channel = StreamingChannel(processfn, initialData)
if not hasattr(self, "channels"):
self.channels = []
self.channels.append(channel)
return channel
class StreamingChannel(StreamingDataAdapter):
def __init__(self, processfn, initialData):
super(StreamingChannel,self).__init__()
self.processfn = processfn
self.processedData = None
self.accumulator = initialData
def processNextData(self, nextData):
newProcessedData, self.accumulator = self.processfn(self.accumulator, nextData)
#merge
self.processedData = newProcessedData if self.processedData is None else self.processedData + newProcessedData
def doGetNextData(self):
nextData = self.processedData
self.processedData = None
return nextData
``` |
{
"source": "JordanGibson/bolt-python",
"score": 2
} |
#### File: adapter/falcon/resource.py
```python
from datetime import datetime # type: ignore
from http import HTTPStatus
from falcon import Request, Response
from slack_bolt import BoltResponse
from slack_bolt.app import App
from slack_bolt.oauth import OAuthFlow
from slack_bolt.request import BoltRequest
class SlackAppResource:
"""
from slack_bolt import App
app = App()
import falcon
api = application = falcon.API()
api.add_route("/slack/events", SlackAppResource(app))
"""
def __init__(self, app: App): # type: ignore
self.app = app
def on_get(self, req: Request, resp: Response):
if self.app.oauth_flow is not None:
oauth_flow: OAuthFlow = self.app.oauth_flow
if req.path == oauth_flow.install_path:
bolt_resp = oauth_flow.handle_installation(self._to_bolt_request(req))
self._write_response(bolt_resp, resp)
return
elif req.path == oauth_flow.redirect_uri_path:
bolt_resp = oauth_flow.handle_callback(self._to_bolt_request(req))
self._write_response(bolt_resp, resp)
return
resp.status = "404"
resp.body = "The page is not found..."
def on_post(self, req: Request, resp: Response):
bolt_req = self._to_bolt_request(req)
bolt_resp = self.app.dispatch(bolt_req)
self._write_response(bolt_resp, resp)
def _to_bolt_request(self, req: Request) -> BoltRequest:
return BoltRequest(
body=req.stream.read(req.content_length or 0).decode("utf-8"),
query=req.query_string,
headers={k.lower(): v for k, v in req.headers.items()},
)
def _write_response(self, bolt_resp: BoltResponse, resp: Response):
resp.body = bolt_resp.body
status = HTTPStatus(bolt_resp.status)
resp.status = str(f"{status.value} {status.phrase}")
resp.set_headers(bolt_resp.first_headers_without_set_cookie())
for cookie in bolt_resp.cookies():
for name, c in cookie.items():
expire_value = c.get("expires")
expire = (
datetime.strptime(expire_value, "%a, %d %b %Y %H:%M:%S %Z")
if expire_value
else None
)
resp.set_cookie(
name=name,
value=c.value,
expires=expire,
max_age=c.get("max-age"),
domain=c.get("domain"),
path=c.get("path"),
secure=True,
http_only=True,
)
``` |
{
"source": "Jordan-Gillard/Versionizer",
"score": 2
} |
#### File: Versionizer/tests/test_ast_differ.py
```python
import os
import pytest
from tests.shared_functions import generate_temp_file_with_content
from versionizer.ast_differ import ASTDiffer
from versionizer.ast_handler import ASTHandler
@pytest.fixture
def temp_file1():
func = "def foo():\n return 1"
for filename in generate_temp_file_with_content(func):
yield filename
@pytest.fixture
def temp_file2():
func = "def foo():\n return 2"
for filename in generate_temp_file_with_content(func):
yield filename
def test_temp_file_fixtures(temp_file1):
assert os.path.getsize(temp_file1) > 0
def test_get_changed_function_nodes_returns_nothing_for_no_change(temp_file1):
handler1 = ASTHandler(temp_file1)
handler2 = ASTHandler(temp_file1)
differ = ASTDiffer(handler1, handler2)
assert not differ.get_changed_function_nodes()
def test_get_changed_function_nodes_returns_node_that_was_changed(temp_file1,
temp_file2):
handler1 = ASTHandler(temp_file1)
handler2 = ASTHandler(temp_file2)
differ = ASTDiffer(handler1, handler2)
diff_nodes = differ.get_changed_function_nodes()
assert len(diff_nodes) == 1
different_node = diff_nodes.pop()
assert different_node.name == "foo"
```
#### File: Versionizer/tests/test_cli.py
```python
import io
from contextlib import redirect_stderr
import pytest
from versionizer.cli import parser
@pytest.fixture()
def default_args():
return ["--previous-commit", "fake_commit_12345", "--project-path", "sample_files/"]
def test_parser_defaults(default_args):
args = parser.parse_args(default_args)
assert args.generate_tests
assert args.run_tests
assert args.algorithm == "WHOLE_SUITE"
def test_parser_dont_generate_tests(default_args):
default_args.append('-dgt')
args = parser.parse_args(default_args)
assert args.generate_tests is False
def test_parser_dont_run_tests(default_args):
default_args.append('-dr')
args = parser.parse_args(default_args)
assert args.run_tests is False
def test_parser_dont_generate_tests_and_run_tests(default_args):
default_args.extend(['-dgt', '-r'])
args = parser.parse_args(default_args)
assert args.generate_tests is False
assert args.run_tests
def test_parser_generate_tests_and_dont_run_tests(default_args):
default_args.extend(['-gt', '-dr'])
args = parser.parse_args(default_args)
assert args.generate_tests
assert args.run_tests is False
def test_parser_throws_error_with_bad_algorithm(default_args):
default_args.extend(["--algorithm", "FAKE_ALGORITHM"])
# We don't want the error showing up in the test results
with redirect_stderr(io.StringIO()):
with pytest.raises(SystemExit):
parser.parse_args(default_args)
```
#### File: Versionizer/versionizer/utils.py
```python
import contextlib
import io
import sys
from asciistuff import Banner, Lolcat
from colorama import Fore, Style
@contextlib.contextmanager
def no_stdout():
save_stdout = sys.stdout
sys.stdout = io.BytesIO()
yield
sys.stdout = save_stdout
def print_banner():
print(Lolcat(Banner("Versionizer", font='bubble'), spread=0.5))
def print_bright_blue(message):
print(Style.BRIGHT + Fore.BLUE + message)
print(Style.RESET_ALL)
```
#### File: Versionizer/versionizer/versionizer.py
```python
import logging
import os
from typing import Optional, Set
from versionizer.ast_differ import ASTDiffer
from versionizer.ast_handler import ASTHandler
from versionizer.automated_test_executor import AutomatedTestExecutor
from versionizer.automated_test_generator import AutomatedTestGenerator
from versionizer.function_node import FunctionNode
from versionizer.git_handler import GitHandler
from versionizer.parsed_ast_builder import ParsedASTBuilder
class Versionizer:
def __init__(self, project_path: str,
first_commit: str,
output_path: Optional[str] = None,
second_commit: Optional[str] = None,
module: str = "",
algorithm="WHOLE_SUITE",
generate_tests=True,
run_tests=True):
self.project_path = project_path
self.module = module
if not output_path:
self.output_path = project_path
else:
self.output_path = output_path
self.first_commit = first_commit
self.second_commit = second_commit
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.generate_tests = generate_tests
self.run_tests = run_tests
self.test_generator = AutomatedTestGenerator(project_path, output_path,
algorithm, module)
self.git_handler = GitHandler(self.first_commit, self.second_commit)
@staticmethod
def _validate_algorithm(algorithm):
# TODO: Should algorithm validation be done in the AutomatedTestGenerator?
accepted_algorithms = ["RANDOM", "MOSA", "RANDOM_SEARCH", "WHOLE_SUITE"]
if algorithm not in accepted_algorithms:
raise ValueError(f"Algorithms must be one of {', '.join(algorithm)}.")
def run(self):
self.git_handler.stash_changes_if_necessary()
try:
# Handle working with a single file
if self.module:
self._run_for_file(self.project_path, self.module)
# Handle working with an entire directory
else:
for dirpath, dirnames, filenames in os.walk(self.project_path):
for file in filenames:
if file.endswith(
".py") and "test" not in file and "init" not in file:
self._run_for_file(self.project_path, file)
except Exception as e:
logging.error(e)
finally:
self.git_handler.return_to_head()
self.git_handler.pop_stash_if_needed()
if self.run_tests:
AutomatedTestExecutor.run_tests(self.project_path)
def _run_for_file(self, project_path, file):
self.git_handler.checkout_first_commit()
file_path_to_test = os.path.join(project_path, file)
ast_handler_1 = ASTHandler(file_path_to_test)
self.git_handler.checkout_second_commit()
ast_handler_2 = ASTHandler(file_path_to_test)
ast_differ = ASTDiffer(ast_handler_1, ast_handler_2)
different_nodes: Set[FunctionNode] = ast_differ.get_changed_function_nodes()
self.git_handler.checkout_first_commit()
parsed_ast_builder: ParsedASTBuilder = ParsedASTBuilder(file_path_to_test,
different_nodes,
ast_handler_1.get_function_dependents())
parsed_ast_builder.build_source()
if self.generate_tests:
self.test_generator.generate_tests()
test_file_name = "test_" + file
test_file_path = os.path.join(project_path, test_file_name)
with open(test_file_path, "r+") as f:
test_file_lines = f.readlines()
self.git_handler.return_to_head()
with open(test_file_path, "w") as f:
f.writelines(test_file_lines)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.