max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
sky/crawler/crawl.py
|
amititash/sky
| 325 |
142884
|
#!/usr/bin/env python3
"""A simple web crawler -- main driver program."""
import asyncio
import logging
import sys
import os
from sky.crawler.crawling import Crawler
from sky.crawler.reporting import report
def get_config(config, loop):
for i in range(len(config["seed_urls"])):
if "://" not in config["seed_urls"][i]:
config["seed_urls"][i] = "http://" + config["seed_urls"][i]
config["loop"] = loop
return config
def start(
config,
crawler_class=Crawler,
save_data_result_fn=None,
save_bulk_data_fn=None,
logging_level=2,
cache=None,
):
"""Main program.
Parse arguments, set up event loop, run crawler, print report.
"""
logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=logging_levels[min(logging_level, len(logging_levels) - 1)])
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
conf = get_config(config, loop)
crawler = crawler_class(conf, cache)
if save_data_result_fn is not None:
crawler.save_data = save_data_result_fn
if save_bulk_data_fn is not None:
crawler.save_bulk_data = save_bulk_data_fn
if crawler.login_url:
loop.run_until_complete(crawler.login())
try:
loop.run_until_complete(crawler.crawl()) # Crawler gonna crawl.
except KeyboardInterrupt:
sys.stderr.flush()
print("\nInterrupted\n")
except Exception as e:
print("CRITICAL ERROR main loop exception: %r", e)
finally:
loop.run_until_complete(crawler.finish_leftovers())
report(crawler)
|
dragonfly/opt/unittest_euclidean_multiobjective_gp_bandit.py
|
hase1128/dragonfly
| 675 |
142910
|
<gh_stars>100-1000
"""
Unit tests for multi-objective optimisation using GP Bandits.
-- <EMAIL>
"""
import unittest
from . import multiobjective_gp_bandit
from .unittest_euclidean_random_multiobjective_optimiser import \
EuclideanMultiObjectiveOptimiserBaseTestCase
from ..utils.base_test_class import BaseTestClass, execute_tests
@unittest.skip
class EuclideanRandomMultiObjectiveOptimiserTestCase(
EuclideanMultiObjectiveOptimiserBaseTestCase, BaseTestClass):
""" Unit-tests for random multi-objective optimisation. """
@classmethod
def _child_instantiate_optimiser(cls, multi_func_caller, worker_manager, options,
reporter):
""" Instantiate optimiser. """
return multiobjective_gp_bandit.EuclideanMultiObjectiveGPBandit(multi_func_caller,
worker_manager, is_mf=False, options=options, reporter=reporter)
@classmethod
def run_optimiser(cls, multi_func_caller, worker_manager, max_capital, mode,
*args, **kwargs):
""" Runs multi-objective optimiser. """
return multiobjective_gp_bandit.multiobjective_gpb_from_multi_func_caller(
multi_func_caller, worker_manager, max_capital, is_mf=False,
mode=mode, *args, **kwargs)
if __name__ == '__main__':
execute_tests()
|
vulnerabilities/migrations/0002_add_patched_package.py
|
bhuvi11/vulnerablecode
| 257 |
142929
|
# Generated by Django 3.0.13 on 2021-04-04 06:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("vulnerabilities", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="packagerelatedvulnerability",
name="patched_package",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="patched_package",
to="vulnerabilities.Package",
),
),
migrations.AlterField(
model_name="packagerelatedvulnerability",
name="package",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="vulnerable_package",
to="vulnerabilities.Package",
),
),
migrations.AlterField(
model_name="vulnerability",
name="vulnerability_id",
field=models.CharField(
help_text="Unique identifier for a vulnerability: this is either a published CVE id (as in CVE-2020-7965) if it exists. Otherwise this is a VulnerableCode-assigned VULCOID (as in VULCOID-20210222-1315-16461541). When a vulnerability CVE is assigned later we replace this with the CVE and keep the 'old' VULCOID in the 'old_vulnerability_id' field to support redirection to the CVE id.",
max_length=50,
unique=True,
),
),
migrations.AlterField(
model_name="vulnerabilityseverity",
name="scoring_system",
field=models.CharField(
choices=[
("cvssv2", "CVSSv2 Base Score"),
("cvssv2_vector", "CVSSv2 Vector"),
("cvssv3", "CVSSv3 Base Score"),
("cvssv3_vector", "CVSSv3 Vector"),
("cvssv3.1", "CVSSv3.1 Base Score"),
("cvssv3.1_vector", "CVSSv3.1 Vector"),
("rhbs", "RedHat Bugzilla severity"),
("rhas", "RedHat Aggregate severity"),
("avgs", "Archlinux Vulnerability Group Severity"),
("cvssv3.1_qr", "CVSSv3.1 Qualitative Severity Rating"),
("generic_textual", "Generic textual severity rating"),
],
help_text="identifier for the scoring system used. Available choices are: cvssv2 is vulnerability_id for CVSSv2 Base Score system, cvssv2_vector is vulnerability_id for CVSSv2 Vector system, cvssv3 is vulnerability_id for CVSSv3 Base Score system, cvssv3_vector is vulnerability_id for CVSSv3 Vector system, cvssv3.1 is vulnerability_id for CVSSv3.1 Base Score system, cvssv3.1_vector is vulnerability_id for CVSSv3.1 Vector system, rhbs is vulnerability_id for RedHat Bugzilla severity system, rhas is vulnerability_id for RedHat Aggregate severity system, avgs is vulnerability_id for Archlinux Vulnerability Group Severity system, cvssv3.1_qr is vulnerability_id for CVSSv3.1 Qualitative Severity Rating system, generic_textual is vulnerability_id for Generic textual severity rating system ",
max_length=50,
),
),
]
|
Bokeh/bubble_plot.py
|
NoonienSoong/Data-Science-45min-Intros
| 1,406 |
142939
|
<filename>Bokeh/bubble_plot.py
from collections import OrderedDict
import pandas as pd
import numpy as np
from jinja2 import Template
from bokeh.embed import components
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d,
LinearAxis, HoverTool, Text,
SingleIntervalTicker, Slider, Callback
)
from bokeh.palettes import Spectral6
from bokeh.plotting import vplot, hplot
from bokeh.resources import INLINE, Resources
from bokeh.templates import RESOURCES
def _get_data():
# Get the data
fertility_df = pd.read_csv('assets/fertility.csv', index_col='Country')
life_expectancy_df = pd.read_csv('assets/life_expectancy.csv', index_col='Country')
population_df = pd.read_csv('assets/population.csv', index_col='Country')
regions_df = pd.read_csv('assets/regions.csv', index_col='Country')
columns = list(fertility_df.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility_df = fertility_df.rename(columns=rename_dict)
life_expectancy_df = life_expectancy_df.rename(columns=rename_dict)
population_df = population_df.rename(columns=rename_dict)
regions_df = regions_df.rename(columns=rename_dict)
scale_factor = 200
population_df_size = np.sqrt(population_df / np.pi) / scale_factor
min_size = 3
population_df_size = population_df_size.where(population_df_size >= min_size).fillna(min_size)
regions_df.Group = regions_df.Group.astype('category')
regions = list(regions_df.Group.cat.categories)
def get_color(r):
return Spectral6[regions.index(r.Group)]
regions_df['region_color'] = regions_df.apply(get_color, axis=1)
return (years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df)
def _get_plot():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
# Set-up the sources
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
# Build the plot
# Set up the plot
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# Add the year in background (add before circle)
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
renderer_source.trigger('change');
text_source.set('data', {'year': [String(year)]});
text_source.trigger('change');
""" % js_source_array
callback = Callback(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback)
callback.args["slider"] = slider
callback.args["renderer_source"] = renderer_source
callback.args["text_source"] = text_source
# Lay it out
return vplot(plot, hplot(slider))
def get_bubble_html(plot=None):
if plot:
layout = plot
else:
layout = _get_plot()
with open('assets/bubble_template.html', 'r') as f:
template = Template(f.read())
resources = Resources(mode='server', root_url='/tree/')
bokeh_js = RESOURCES.render(js_files=resources.js_files)
script, div = components(layout)
html = template.render(
title="Bokeh - Gapminder demo",
bokeh_js=bokeh_js,
plot_script=script,
plot_div=div,
)
return html
def get_1964_data():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
year = 1964
region_color = regions_df['region_color']
region_color.name = 'region_color'
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
return new_df
def get_scatter_data():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
xyvalues = OrderedDict()
xyvalues['1964'] = list(
zip(
fertility_df[1964].dropna().values,
life_expectancy_df[1964].dropna().values
)
)
return xyvalues
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-lines-to-write-string.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
142944
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
result = [1, 0]
for c in S:
w = widths[ord(c)-ord('a')]
result[1] += w
if result[1] > 100:
result[0] += 1
result[1] = w
return result
|
blocks/bricks/recurrent/__init__.py
|
KIKOcaoyue/blocks
| 1,067 |
142955
|
<filename>blocks/bricks/recurrent/__init__.py<gh_stars>1000+
from .base import BaseRecurrent, recurrent
from .architectures import SimpleRecurrent, LSTM, GatedRecurrent
from .misc import Bidirectional, RecurrentStack, RECURRENTSTACK_SEPARATOR
__all__ = ("BaseRecurrent", "recurrent", "SimpleRecurrent", "LSTM",
"GatedRecurrent", "Bidirectional", "RecurrentStack",
"RECURRENTSTACK_SEPARATOR")
|
tests/admin/clush-tests/TaskDistantPdshMixin.py
|
utdsimmons/ohpc
| 692 |
142956
|
<reponame>utdsimmons/ohpc
#!/usr/bin/env python
# ClusterShell (distant, pdsh worker) test suite
# Written by <NAME>
"""Unit test for ClusterShell Task (distant, pdsh worker)"""
import copy
import shutil
import sys
sys.path.insert(0, '../lib')
from TLib import HOSTNAME, make_temp_filename, make_temp_dir
from ClusterShell.Event import EventHandler
from ClusterShell.NodeSet import NodeSet
from ClusterShell.Task import *
from ClusterShell.Worker.Worker import WorkerBadArgumentError
from ClusterShell.Worker.Pdsh import WorkerPdsh
from ClusterShell.Worker.EngineClient import *
import socket
# TEventHandlerChecker 'received event' flags
EV_START = 0x01
EV_PICKUP = 0x02
EV_READ = 0x04
EV_WRITTEN = 0x08
EV_HUP = 0x10
EV_TIMEOUT = 0x20
EV_CLOSE = 0x40
class TaskDistantPdshMixin(object):
def setUp(self):
self._task = task_self()
def testWorkerPdshGetCommand(self):
# test worker.command with WorkerPdsh
worker1 = WorkerPdsh(HOSTNAME, command="/bin/echo foo bar fuu",
handler=None, timeout=5)
self._task.schedule(worker1)
worker2 = WorkerPdsh(HOSTNAME, command="/bin/echo blah blah foo",
handler=None, timeout=5)
self._task.schedule(worker2)
# run task
self._task.resume()
# test output
self.assertEqual(worker1.node_buffer(HOSTNAME), "foo bar fuu")
self.assertEqual(worker1.command, "/bin/echo foo bar fuu")
self.assertEqual(worker2.node_buffer(HOSTNAME), "blah blah foo")
self.assertEqual(worker2.command, "/bin/echo blah blah foo")
def testLocalhostExplicitPdshCopy(self):
# test simple localhost copy with explicit pdsh worker
dest = make_temp_filename(suffix='LocalhostExplicitPdshCopy')
try:
worker = WorkerPdsh(HOSTNAME, source="/etc/hosts",
dest=dest, handler=None, timeout=10)
self._task.schedule(worker)
self._task.resume()
self.assertEqual(worker.source, "/etc/hosts")
self.assertEqual(worker.dest, dest)
finally:
os.unlink(dest)
def testLocalhostExplicitPdshCopyWithOptions(self):
dest = make_temp_dir('testLocalhostExplicitPdshCopyWithOptions')
self._task.set_info("pdcp_path", "pdcp -p")
try:
worker = WorkerPdsh(HOSTNAME, source="/etc/hosts", dest=dest,
handler=None)
self._task.schedule(worker)
self._task.resume()
self.assertEqual(self._task.max_retcode(), 0)
self.assertTrue(os.path.exists(os.path.join(dest, "hosts")))
finally:
os.unlink(os.path.join(dest, "hosts"))
os.rmdir(dest)
# clear options after test
task_cleanup()
self.assertEqual(task_self().info("pdcp_path"), None)
def testLocalhostExplicitPdshCopyDir(self):
# test simple localhost copy dir with explicit pdsh worker
dtmp_src = make_temp_dir('src')
# pdcp worker doesn't create custom destination directory
dtmp_dst = make_temp_dir('testLocalhostExplicitPdshCopyDir')
try:
os.mkdir(os.path.join(dtmp_src, "lev1_a"))
os.mkdir(os.path.join(dtmp_src, "lev1_b"))
os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2"))
worker = WorkerPdsh(HOSTNAME, source=dtmp_src,
dest=dtmp_dst, handler=None, timeout=10)
self._task.schedule(worker)
self._task.resume()
self.assertTrue(os.path.exists(os.path.join(dtmp_dst, \
os.path.basename(dtmp_src), "lev1_a", "lev2")))
finally:
shutil.rmtree(dtmp_dst, ignore_errors=True)
shutil.rmtree(dtmp_src, ignore_errors=True)
def testLocalhostExplicitPdshCopyDirPreserve(self):
# test simple localhost preserve copy dir with explicit pdsh worker
dtmp_src = make_temp_dir('src')
# pdcp worker doesn't create custom destination directory
dtmp_dst = make_temp_dir('testLocalhostExplicitPdshCopyDirPreserve')
try:
os.mkdir(os.path.join(dtmp_src, "lev1_a"))
os.mkdir(os.path.join(dtmp_src, "lev1_b"))
os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2"))
worker = WorkerPdsh(HOSTNAME, source=dtmp_src,
dest=dtmp_dst, handler=None, timeout=10, preserve=True)
self._task.schedule(worker)
self._task.resume()
self.assert_(os.path.exists(os.path.join(dtmp_dst, \
os.path.basename(dtmp_src), "lev1_a", "lev2")))
finally:
shutil.rmtree(dtmp_dst, ignore_errors=True)
shutil.rmtree(dtmp_src, ignore_errors=True)
def testExplicitPdshWorker(self):
# test simple localhost command with explicit pdsh worker
# init worker
worker = WorkerPdsh(HOSTNAME, command="echo alright", handler=None)
self._task.schedule(worker)
# run task
self._task.resume()
# test output
self.assertEqual(worker.node_buffer(HOSTNAME), "alright")
def testExplicitPdshWorkerWithOptions(self):
self._task.set_info("pdsh_path", "/usr/bin/pdsh -S")
worker = WorkerPdsh(HOSTNAME, command="echo alright", handler=None)
self._task.schedule(worker)
# run task
self._task.resume()
# test output
self.assertEqual(worker.node_buffer(HOSTNAME), "alright")
# clear options after test
task_cleanup()
self.assertEqual(task_self().info("pdsh_path"), None)
def testExplicitPdshWorkerStdErr(self):
# test simple localhost command with explicit pdsh worker (stderr)
worker = WorkerPdsh(HOSTNAME, command="echo alright 1>&2",
handler=None, stderr=True)
self._task.schedule(worker)
# run task
self._task.resume()
# test output
self.assertEqual(worker.node_error_buffer(HOSTNAME), "alright")
# Re-test with stderr=False
worker = WorkerPdsh(HOSTNAME, command="echo alright 1>&2",
handler=None, stderr=False)
self._task.schedule(worker)
# run task
self._task.resume()
# test output
self.assertEqual(worker.node_error_buffer(HOSTNAME), None)
def testPdshWorkerWriteNotSupported(self):
# test that write is reported as not supported with pdsh
worker = WorkerPdsh(HOSTNAME, command="uname -r", handler=None,
timeout=5)
self.assertRaises(EngineClientNotSupportedError, worker.write, "toto")
class TEventHandlerChecker(EventHandler):
"""simple event trigger validator"""
def __init__(self, test):
self.test = test
self.flags = 0
self.read_count = 0
self.written_count = 0
def ev_start(self, worker):
self.test.assertEqual(self.flags, 0)
self.flags |= EV_START
def ev_pickup(self, worker):
self.test.assertTrue(self.flags & EV_START)
self.flags |= EV_PICKUP
self.last_node = worker.current_node
def ev_read(self, worker):
self.test.assertEqual(self.flags, EV_START | EV_PICKUP)
self.flags |= EV_READ
self.last_node = worker.current_node
self.last_read = worker.current_msg
def ev_written(self, worker):
self.test.assertTrue(self.flags & (EV_START | EV_PICKUP))
self.flags |= EV_WRITTEN
def ev_hup(self, worker):
self.test.assertTrue(self.flags & (EV_START | EV_PICKUP))
self.flags |= EV_HUP
self.last_node = worker.current_node
self.last_rc = worker.current_rc
def ev_timeout(self, worker):
self.test.assertTrue(self.flags & EV_START)
self.flags |= EV_TIMEOUT
self.last_node = worker.current_node
def ev_close(self, worker):
self.test.assertTrue(self.flags & EV_START)
self.test.assertTrue(self.flags & EV_CLOSE == 0)
self.flags |= EV_CLOSE
def testExplicitWorkerPdshShellEvents(self):
# test triggered events with explicit pdsh worker
test_eh = self.__class__.TEventHandlerChecker(self)
worker = WorkerPdsh(HOSTNAME, command="hostname", handler=test_eh, timeout=None)
self._task.schedule(worker)
# run task
self._task.resume()
# test events received: start, read, hup, close
self.assertEqual(test_eh.flags, EV_START | EV_PICKUP | EV_READ | EV_HUP | EV_CLOSE)
def testExplicitWorkerPdshShellEventsWithTimeout(self):
# test triggered events (with timeout) with explicit pdsh worker
test_eh = self.__class__.TEventHandlerChecker(self)
worker = WorkerPdsh(HOSTNAME, command="echo alright && sleep 10",
handler=test_eh, timeout=2)
self._task.schedule(worker)
# run task
self._task.resume()
# test events received: start, read, timeout, close
self.assertEqual(test_eh.flags, EV_START | EV_PICKUP | EV_READ | EV_TIMEOUT | EV_CLOSE)
self.assertEqual(worker.node_buffer(HOSTNAME), "alright")
def testShellPdshEventsNoReadNoTimeout(self):
# test triggered events (no read, no timeout) with explicit pdsh worker
test_eh = self.__class__.TEventHandlerChecker(self)
worker = WorkerPdsh(HOSTNAME, command="sleep 2",
handler=test_eh, timeout=None)
self._task.schedule(worker)
# run task
self._task.resume()
# test events received: start, close
self.assertEqual(test_eh.flags, EV_START | EV_PICKUP | EV_HUP | EV_CLOSE)
self.assertEqual(worker.node_buffer(HOSTNAME), None)
def testWorkerPdshBuffers(self):
# test buffers at pdsh worker level
worker = WorkerPdsh(HOSTNAME, command="printf 'foo\nbar\nxxx\n'",
handler=None, timeout=None)
self._task.schedule(worker)
self._task.resume()
cnt = 2
for buf, nodes in worker.iter_buffers():
cnt -= 1
if buf == "foo\nbar\nxxx\n":
self.assertEqual(len(nodes), 1)
self.assertEqual(str(nodes), HOSTNAME)
self.assertEqual(cnt, 1)
# new check in 1.7 to ensure match_keys is not a string
testgen = worker.iter_buffers(HOSTNAME)
# cast to list to effectively iterate
self.assertRaises(TypeError, list, testgen)
# and also fixed an issue when match_keys was an empty list
for buf, nodes in worker.iter_buffers([]):
self.assertFalse("Found buffer with empty match_keys?!")
for buf, nodes in worker.iter_buffers([HOSTNAME]):
cnt -= 1
if buf == "foo\nbar\nxxx\n":
self.assertEqual(len(nodes), 1)
self.assertEqual(str(nodes), HOSTNAME)
self.assertEqual(cnt, 0)
def testWorkerPdshNodeBuffers(self):
# test iter_node_buffers on distant pdsh workers
worker = WorkerPdsh(HOSTNAME, command="/usr/bin/printf 'foo\nbar\nxxx\n'",
handler=None, timeout=None)
self._task.schedule(worker)
self._task.resume()
cnt = 1
for node, buf in worker.iter_node_buffers():
cnt -= 1
if buf == "foo\nbar\nxxx\n":
self.assertEqual(node, HOSTNAME)
self.assertEqual(cnt, 0)
def testWorkerPdshNodeErrors(self):
# test iter_node_errors on distant pdsh workers
worker = WorkerPdsh(HOSTNAME, command="/usr/bin/printf 'foo\nbar\nxxx\n' 1>&2",
handler=None, timeout=None, stderr=True)
self._task.schedule(worker)
self._task.resume()
cnt = 1
for node, buf in worker.iter_node_errors():
cnt -= 1
if buf == "foo\nbar\nxxx\n":
self.assertEqual(node, HOSTNAME)
self.assertEqual(cnt, 0)
def testWorkerPdshRetcodes(self):
# test retcodes on distant pdsh workers
worker = WorkerPdsh(HOSTNAME, command="/bin/sh -c 'exit 3'",
handler=None, timeout=None)
self._task.schedule(worker)
self._task.resume()
cnt = 2
for rc, keys in worker.iter_retcodes():
cnt -= 1
self.assertEqual(rc, 3)
self.assertEqual(len(keys), 1)
self.assert_(keys[0] == HOSTNAME)
self.assertEqual(cnt, 1)
for rc, keys in worker.iter_retcodes(HOSTNAME):
cnt -= 1
self.assertEqual(rc, 3)
self.assertEqual(len(keys), 1)
self.assert_(keys[0] == HOSTNAME)
self.assertEqual(cnt, 0)
# test node_retcode
self.assertEqual(worker.node_retcode(HOSTNAME), 3) # 1.2.91+
self.assertEqual(worker.node_rc(HOSTNAME), 3)
# test node_retcode failure
self.assertRaises(KeyError, worker.node_retcode, "dummy")
# test max retcode API
self.assertEqual(self._task.max_retcode(), 3)
def testWorkerNodeRetcodes(self):
# test iter_node_retcodes on distant pdsh workers
worker = WorkerPdsh(HOSTNAME, command="/bin/sh -c 'exit 3'",
handler=None, timeout=None)
self._task.schedule(worker)
self._task.resume()
cnt = 1
for node, rc in worker.iter_node_retcodes():
cnt -= 1
self.assertEqual(rc, 3)
self.assertEqual(node, HOSTNAME)
self.assertEqual(cnt, 0)
def testEscapePdsh(self):
# test distant worker (pdsh) cmd with escaped variable
worker = WorkerPdsh(HOSTNAME, command="export CSTEST=foobar; /bin/echo \$CSTEST | sed 's/\ foo/bar/'",
handler=None, timeout=None)
#task.set_info("debug", True)
self._task.schedule(worker)
# execute
self._task.resume()
# read result
self.assertEqual(worker.node_buffer(HOSTNAME), "$CSTEST")
def testEscapePdsh2(self):
# test distant worker (pdsh) cmd with non-escaped variable
worker = WorkerPdsh(HOSTNAME, command="export CSTEST=foobar; /bin/echo $CSTEST | sed 's/\ foo/bar/'",
handler=None, timeout=None)
self._task.schedule(worker)
# execute
self._task.resume()
# read result
self.assertEqual(worker.node_buffer(HOSTNAME), "foobar")
def testShellPdshStderrWithHandler(self):
# test reading stderr of distant pdsh worker on event handler
class StdErrHandler(EventHandler):
def ev_error(self, worker):
assert worker.last_error() == "something wrong"
worker = WorkerPdsh(HOSTNAME, command="echo something wrong 1>&2",
handler=StdErrHandler(), timeout=None)
self._task.schedule(worker)
self._task.resume()
for buf, nodes in worker.iter_errors():
self.assertEqual(buf, "something wrong")
for buf, nodes in worker.iter_errors([HOSTNAME]):
self.assertEqual(buf, "something wrong")
def testCommandTimeoutOption(self):
# test pdsh shell with command_timeout set
command_timeout_orig = self._task.info("command_timeout")
self._task.set_info("command_timeout", 1)
worker = WorkerPdsh(HOSTNAME, command="sleep 10",
handler=None, timeout=None)
self._task.schedule(worker)
self.assert_(worker != None)
self._task.resume()
# restore original command_timeout (0)
self.assertEqual(command_timeout_orig, 0)
self._task.set_info("command_timeout", command_timeout_orig)
def testPdshBadArgumentOption(self):
# test WorkerPdsh constructor bad argument
# Check code < 1.4 compatibility
self.assertRaises(WorkerBadArgumentError, WorkerPdsh, HOSTNAME,
None, None)
# As of 1.4, ValueError is raised for missing parameter
self.assertRaises(ValueError, WorkerPdsh, HOSTNAME, None, None) # 1.4+
def testCopyEvents(self):
test_eh = self.__class__.TEventHandlerChecker(self)
dest = "/tmp/cs-test_testLocalhostPdshCopyEvents"
try:
worker = WorkerPdsh(HOSTNAME, source="/etc/hosts",
dest=dest, handler=test_eh, timeout=10)
self._task.schedule(worker)
self._task.resume()
self.assertEqual(test_eh.flags, EV_START | EV_PICKUP | EV_HUP | EV_CLOSE)
finally:
os.remove(dest)
def testWorkerAbort(self):
# test WorkerPdsh abort() on timer
class AbortOnTimer(EventHandler):
def __init__(self, worker):
EventHandler.__init__(self)
self.ext_worker = worker
self.testtimer = False
def ev_timer(self, timer):
self.ext_worker.abort()
self.testtimer = True
worker = WorkerPdsh(HOSTNAME, command="sleep 10",
handler=None, timeout=None)
self._task.schedule(worker)
aot = AbortOnTimer(worker)
self.assertEqual(aot.testtimer, False)
self._task.timer(2.0, handler=aot)
self._task.resume()
self.assertEqual(aot.testtimer, True)
def testWorkerAbortSanity(self):
# test WorkerPdsh abort() (sanity)
# test noop abort() on unscheduled worker
worker = WorkerPdsh(HOSTNAME, command="sleep 1", handler=None,
timeout=None)
worker.abort()
def testLocalhostExplicitPdshReverseCopy(self):
# test simple localhost rcopy with explicit pdsh worker
dest = "/tmp/cs-test_testLocalhostExplicitPdshRCopy"
shutil.rmtree(dest, ignore_errors=True)
try:
os.mkdir(dest)
worker = WorkerPdsh(HOSTNAME, source="/etc/hosts",
dest=dest, handler=None, timeout=10, reverse=True)
self._task.schedule(worker)
self._task.resume()
self.assertEqual(worker.source, "/etc/hosts")
self.assertEqual(worker.dest, dest)
self.assert_(os.path.exists(os.path.join(dest, "hosts.%s" % HOSTNAME)))
finally:
shutil.rmtree(dest, ignore_errors=True)
def testLocalhostExplicitPdshReverseCopyDir(self):
# test simple localhost rcopy dir with explicit pdsh worker
dtmp_src = make_temp_dir('src')
dtmp_dst = make_temp_dir('testLocalhostExplicitPdshReverseCopyDir')
try:
os.mkdir(os.path.join(dtmp_src, "lev1_a"))
os.mkdir(os.path.join(dtmp_src, "lev1_b"))
os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2"))
worker = WorkerPdsh(HOSTNAME, source=dtmp_src,
dest=dtmp_dst, handler=None, timeout=30, reverse=True)
self._task.schedule(worker)
self._task.resume()
self.assert_(os.path.exists(os.path.join(dtmp_dst, \
"%s.%s" % (os.path.basename(dtmp_src), HOSTNAME), "lev1_a", "lev2")))
finally:
shutil.rmtree(dtmp_dst, ignore_errors=True)
shutil.rmtree(dtmp_src, ignore_errors=True)
def testLocalhostExplicitPdshReverseCopyDirPreserve(self):
# test simple localhost preserve rcopy dir with explicit pdsh worker
dtmp_src = make_temp_dir('src')
dtmp_dst = make_temp_dir('testLocalhostExplicitPdshReverseCopyDirPreserve')
try:
os.mkdir(os.path.join(dtmp_src, "lev1_a"))
os.mkdir(os.path.join(dtmp_src, "lev1_b"))
os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2"))
worker = WorkerPdsh(HOSTNAME, source=dtmp_src,
dest=dtmp_dst, handler=None, timeout=30, preserve=True,
reverse=True)
self._task.schedule(worker)
self._task.resume()
self.assert_(os.path.exists(os.path.join(dtmp_dst, \
"%s.%s" % (os.path.basename(dtmp_src), HOSTNAME), "lev1_a", "lev2")))
finally:
shutil.rmtree(dtmp_dst, ignore_errors=True)
shutil.rmtree(dtmp_src, ignore_errors=True)
class TEventHandlerEvCountChecker(EventHandler):
"""simple event count validator"""
def __init__(self):
self.start_count = 0
self.pickup_count = 0
self.hup_count = 0
self.close_count = 0
def ev_start(self, worker):
self.start_count += 1
def ev_pickup(self, worker):
self.pickup_count += 1
def ev_hup(self, worker):
self.hup_count += 1
def ev_close(self, worker):
self.close_count += 1
def testWorkerEventCount(self):
test_eh = self.__class__.TEventHandlerEvCountChecker()
nodes = "localhost,%s" % HOSTNAME
worker = WorkerPdsh(nodes, command="/bin/hostname", handler=test_eh)
self._task.schedule(worker)
self._task.resume()
# test event count
self.assertEqual(test_eh.pickup_count, 2)
self.assertEqual(test_eh.hup_count, 2)
self.assertEqual(test_eh.start_count, 1)
self.assertEqual(test_eh.close_count, 1)
|
setup.py
|
VivumLab/py_cui
| 654 |
142985
|
import setuptools
from sys import platform
# Use README for long description
with open('README.md', 'r') as readme_fp:
long_description = readme_fp.read()
with open('requirements.txt', 'r') as req_fp:
required_libs = req_fp.readlines()
# py_cui setup
setuptools.setup(
name='py_cui',
description='A widget and grid based framework for building command line user interfaces in python.',
long_description=long_description,
long_description_content_type='text/markdown',
version='0.1.4',
author='<NAME>',
author_email='<EMAIL>',
license='BSD (3-clause)',
packages=setuptools.find_packages(exclude=['docs','tests', 'examples', 'venv']),
install_requires=required_libs,
url='https://github.com/jwlodek/py_cui',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='cui cli commandline user-interface ui',
python_requires='>=3.6',
)
|
test/integ/test_concurrent_insert.py
|
jurecuhalev/snowflake-connector-python
| 311 |
143007
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
from concurrent.futures.thread import ThreadPoolExecutor
from logging import getLogger
import pytest
import snowflake.connector
from snowflake.connector.errors import ProgrammingError
try:
from parameters import CONNECTION_PARAMETERS_ADMIN
except Exception:
CONNECTION_PARAMETERS_ADMIN = {}
logger = getLogger(__name__)
def _concurrent_insert(meta):
"""Concurrent insert method."""
cnx = snowflake.connector.connect(
user=meta["user"],
password=meta["password"],
host=meta["host"],
port=meta["port"],
account=meta["account"],
database=meta["database"],
schema=meta["schema"],
timezone="UTC",
protocol="http",
)
try:
cnx.cursor().execute("use warehouse {}".format(meta["warehouse"]))
table = meta["table"]
sql = "insert into {name} values(%(c1)s, %(c2)s)".format(name=table)
logger.debug(sql)
cnx.cursor().execute(
sql,
{
"c1": meta["idx"],
"c2": "test string " + meta["idx"],
},
)
meta["success"] = True
logger.debug("Succeeded process #%s", meta["idx"])
except Exception:
logger.exception("failed to insert into a table [%s]", table)
meta["success"] = False
finally:
cnx.close()
return meta
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="The user needs a privilege of create warehouse.",
)
def test_concurrent_insert(conn_cnx, db_parameters):
"""Concurrent insert tests. Inserts block on the one that's running."""
number_of_threads = 22 # change this to increase the concurrency
expected_success_runs = number_of_threads - 1
cnx_array = []
try:
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
create or replace warehouse {}
warehouse_type=standard
warehouse_size=small
""".format(
db_parameters["name_wh"]
)
)
sql = """
create or replace table {name} (c1 integer, c2 string)
""".format(
name=db_parameters["name"]
)
cnx.cursor().execute(sql)
for i in range(number_of_threads):
cnx_array.append(
{
"host": db_parameters["host"],
"port": db_parameters["port"],
"user": db_parameters["user"],
"password": db_parameters["password"],
"account": db_parameters["account"],
"database": db_parameters["database"],
"schema": db_parameters["schema"],
"table": db_parameters["name"],
"idx": str(i),
"warehouse": db_parameters["name_wh"],
}
)
pool = ThreadPoolExecutor(number_of_threads)
results = list(pool.map(_concurrent_insert, cnx_array))
pool.shutdown()
success = 0
for record in results:
success += 1 if record["success"] else 0
# 21 threads or more
assert success >= expected_success_runs, "Number of success run"
c = cnx.cursor()
sql = "select * from {name} order by 1".format(name=db_parameters["name"])
c.execute(sql)
for rec in c:
logger.debug(rec)
c.close()
finally:
with conn_cnx() as cnx:
cnx.cursor().execute(
"drop table if exists {}".format(db_parameters["name"])
)
cnx.cursor().execute(
"drop warehouse if exists {}".format(db_parameters["name_wh"])
)
def _concurrent_insert_using_connection(meta):
connection = meta["connection"]
idx = meta["idx"]
name = meta["name"]
try:
connection.cursor().execute(
"INSERT INTO {name} VALUES(%s, %s)".format(name=name),
(idx, "test string{}".format(idx)),
)
except ProgrammingError as e:
if e.errno != 619: # SQL Execution Canceled
raise
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="The user needs a privilege of create warehouse.",
)
def test_concurrent_insert_using_connection(conn_cnx, db_parameters):
"""Concurrent insert tests using the same connection."""
try:
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
create or replace warehouse {}
warehouse_type=standard
warehouse_size=small
""".format(
db_parameters["name_wh"]
)
)
cnx.cursor().execute(
"""
CREATE OR REPLACE TABLE {name} (c1 INTEGER, c2 STRING)
""".format(
name=db_parameters["name"]
)
)
number_of_threads = 5
metas = []
for i in range(number_of_threads):
metas.append(
{
"connection": cnx,
"idx": i,
"name": db_parameters["name"],
}
)
pool = ThreadPoolExecutor(number_of_threads)
pool.map(_concurrent_insert_using_connection, metas)
pool.shutdown()
cnt = 0
for _ in cnx.cursor().execute(
"SELECT * FROM {name} ORDER BY 1".format(name=db_parameters["name"])
):
cnt += 1
assert (
cnt <= number_of_threads
), "Number of records should be less than the number of threads"
assert cnt > 0, "Number of records should be one or more number of threads"
finally:
with conn_cnx() as cnx:
cnx.cursor().execute(
"drop table if exists {}".format(db_parameters["name"])
)
cnx.cursor().execute(
"drop warehouse if exists {}".format(db_parameters["name_wh"])
)
|
wafhelpers/rtems_trace.py
|
fakecoinbase/ntpsecslashntpsec
| 201 |
143010
|
<reponame>fakecoinbase/ntpsecslashntpsec
from waflib.TaskGen import feature, after_method
@feature("rtems_trace")
@after_method('apply_link')
def rtems_trace(self):
if self.env.RTEMS_TEST_ENABLE:
self.link_task.env.LINK_CC = self.env.BIN_RTEMS_TLD \
+ self.env.RTEMS_TEST_FLAGS + ['--']
|
win-client/src/external/nanogui/icons/materialicons/util/gen_icon_symbols.py
|
keshon/samuno
| 157 |
143021
|
<gh_stars>100-1000
import os
success = False
with open("../materialicons.go", "w") as output:
output.write("""package materialicons
import "github.com/shibukawa/nanogui.go"
// generated by util/gen_icon_symbols.py
const (
""")
for line in open("./codepoints").readlines():
name, codepoint = line.split(" ")
output.write("Icon%s nanogui.Icon = 0x%s" % (name.title().replace("_", ""), codepoint))
output.write(")")
success = True
if success:
os.system("go fmt ../materialicons.go")
|
test/tests/api-cit/v2_0/skupack_tests.py
|
arunrordell/RackHD
| 451 |
143031
|
<filename>test/tests/api-cit/v2_0/skupack_tests.py
"""
Copyright (c) 2016-2017 Dell Inc. or its subsidiaries. All Rights Reserved.
"""
import fit_path # NOQA: unused import
import fit_common
import time
import requests
import tarfile
import shutil
import os
import flogging
from config.api2_0_config import config
from on_http_api2_0 import ApiApi as Api
from on_http_api2_0.rest import ApiException
from json import loads, dumps, dump
from nosedep import depends
from nose.plugins.attrib import attr
logs = flogging.get_loggers()
@attr(regression=False, smoke=True, skus_api2_tests=True)
class SkusTests(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.__client = config.api_client
cls.clear()
cls.__nodes = []
cls.__sku_id = ""
cls.__workflows = {}
cls.__tasks = {}
cls.__config_json = {}
cls.__rootDir = "/tmp/tarball/"
cls.__skuPackTarball = cls.__rootDir + "mytest.tar.gz"
@classmethod
def tearDownClass(cls):
cls.clear()
def __get_data(self):
return loads(self.__client.last_response.data)
def test_skus(self):
# """Testing GET:api/2.0/skus to get list of skus"""
Api().skus_get()
rsp = self.__client.last_response
self.assertEqual(200, rsp.status, msg=rsp.reason)
loads(self.__client.last_response.data)
# todo: should use api2_check-skus
@depends(after='test_skus')
def test_post_sku(self):
# """Test POST:api/2.0/skus"""
sku = {
"name": "Quanta-D44",
"rules": [
{
"path": "dmi.Base Board Information.Manufacturer",
"contains": "Intel"
},
{
"path": "ohai.dmi.memory.total",
"equals": "32946864kB"
}
],
"discoveryGraphName": "Graph.InstallCoreOS",
"discoveryGraphOptions": {
"username": "testuser",
"password": "<PASSWORD>",
"hostname": "mycoreos"
}
}
Api().skus_post(sku)
data = self.__get_data()
logs.debug(' Posted data %s', dumps(data, indent=4))
logs.info(' Posted sku id %s', data['id'])
for sku_key in sku.keys():
self.assertEqual(sku[sku_key], data[sku_key], msg='Key "{}" not found'.format(sku_key))
# set class level sku_id for patch test
self.__class__.__sku_id = data['id']
# POST the same SKU again and make sure that we get a 409
try:
Api().skus_post(sku)
except ApiException as e:
self.assertEqual(409, e.status, msg='Expected 409, received {}'.format(e.status))
@depends(after='test_post_sku')
def test_skus_id(self):
# """Testing GET:api/2.0/skus/id to get specific catalog details"""
Api().skus_get()
found = False
skus = loads(self.__client.last_response.data)
for n in skus:
sku_id = n.get('id')
logs.info_6(' Checking sku id %s', sku_id)
self.assertIsNotNone(sku_id)
Api().skus_id_get(identifier=sku_id)
rsp = self.__client.last_response
self.assertEqual(200, rsp.status, msg=rsp.reason)
if sku_id == self.__class__.__sku_id:
found = True
self.assertTrue(found, "POSTed sku {} not found".format(sku_id))
@depends(after='test_skus_id')
def test_skus_patch(self):
# """Test PATCH:api/2.0/skus/:identifier"""
logs.info(' Patching SKU %s ', self.__class__.__sku_id)
patch_data = {
"name": "Quanta-T55"
}
Api().skus_patch(self.__class__.__sku_id, patch_data)
result = self.__client.last_response
data = loads(self.__client.last_response.data)
self.assertEqual(200, result.status, msg=result.reason)
self.assertEqual("Quanta-T55", data['name'])
try:
Api().skus_patch('does_not_exist', {})
except ApiException as e:
self.assertEqual(404, e.status, msg='Expected 404, received {}'.format(e.status))
@depends(after='test_skus_patch')
def test_delete_skus(self):
# """Test DELETE:api/2.0/skus/:identifier"""
logs.info(' Deleting SKU %s', self.__class__.__sku_id)
Api().skus_id_delete(identifier=self.__class__.__sku_id)
result = self.__client.last_response
self.assertEqual(204, result.status, msg=result.reason)
# Check if sku was deleted
try:
Api().skus_id_get(identifier=self.__class__.__sku_id)
except ApiException as e:
self.assertEqual(404, e.status, msg='Expected 404, received {}'.format(e.status))
# Check delete of invalid id
try:
Api().skus_id_delete(identifier='does_not_exist')
except ApiException as e:
self.assertEqual(404, e.status, msg='Expected 404, received {}'.format(e.status))
def dont_run_get_sku_nodes(self):
# """Test GET /api/2.0/skus/:identifier/nodes"""
sku = {
"name": "mytestsku",
"rules": [
{
"path": "dmi.Base Board Information.Manufacturer",
"contains": " "
}
]
}
Api().nodes_get_all()
self.__class__.__nodes = loads(self.__client.last_response.data)
for node in self.__class__.__nodes:
if node.get('type') == 'compute':
# update the sku rule above (rules[0].name.contains) with a value from the cataloged node
node_id = node.get('id')
logs.info("Nodeid %s", node_id)
Api().nodes_get_catalog_source_by_id(identifier=node_id, source='dmi')
node_catalog_data = loads(self.__client.last_response.data)
# logs.info('node_manufacturer is : %s ', node_catalog_data)
if len(node_catalog_data) > 0:
logs.info('node_manufacturer is: %s',
node_catalog_data.get('data').get("Base Board Information").get("Manufacturer"))
node_manufacturer = node_catalog_data \
.get('data').get("Base Board Information").get("Manufacturer").split(" ")[0]
sku['rules'][0]['contains'] = node_manufacturer
# POST the new sku
logs.info("posting SKU : %s", sku)
Api().skus_post(sku)
result = self.__client.last_response
data = loads(self.__client.last_response.data)
sku_id = data['id']
logs.info("ID of the posted sku is: %s", sku_id)
self.assertEqual(201, result.status, msg=result.reason)
logs.info("node_id %s", node_id)
# Give enough time to wait the sku discovery finishes
time.sleep(3)
retries = 10
while retries > 0:
self.assertEqual(201, result.status, msg=result.reason)
logs.info("node_id %s", node_id)
# Validate that the sku element in the node has been updated with the right sku ID
Api().nodes_get_by_id(identifier=node_id)
updated_node = loads(self.__client.last_response.data)
if updated_node['sku'] is not None:
logs.info("updated_node is: %s", updated_node)
arr = updated_node['sku'].split("/")
if sku_id == arr[4]:
logs.info("updated_node is : %s", updated_node)
break
retries = retries - 1
if retries == 0:
self.fail("The node {} never be assigned with the new sku {}".format(node_id, sku_id))
# raise Error("The node {0} never be assigned with the new sku {1}".format(node_id, sku_id))
else:
logs.info("Wait more time to let new sku take effect, remaining %s retries", retries)
time.sleep(1)
# validate the /api/2.0/skus/:id/nodes works
Api().skus_id_get_nodes(sku_id)
result = self.__client.last_response
data = loads(self.__client.last_response.data)
self.assertEqual(200, result.status, msg=result.reason)
flag = False
for item in data:
if item["id"] == node_id:
flag = True
break
self.assertTrue(flag, msg='Node id {} not found'.format(node_id))
# delete the sku that where created
logs.info(" Deleting the added sku of %s", sku_id)
Api().skus_id_delete(identifier=sku_id)
result = self.__client.last_response
self.assertEqual(204, result.status, msg=result.reason)
# @depends(after='test_get_sku_nodes')
@depends(after='test_delete_skus')
def test_post_skupack(self):
# """Test POST:api/2.0/skus/pack"""
Api().nodes_get_all()
self.__class__.__nodes = loads(self.__client.last_response.data)
logs.debug_6(" class nodes %s", self.__class__.__nodes)
for node in self.__class__.__nodes:
if node.get('type') == 'compute':
break
if node:
# update the sku rule above (rules[0].name.contains) with a value from the cataloged node
node_id = node.get('id')
logs.info(" Node_id %s ", node_id)
Api().nodes_get_catalog_source_by_id(identifier=node_id, source='dmi')
node_catalog_data = loads(self.__client.last_response.data)
if len(node_catalog_data) > 0:
node_manufacturer = node_catalog_data \
.get('data').get("System Information").get("Manufacturer").split(" ")[0]
logs.info(" node %s Man %s", node_id, node_manufacturer)
# Post the sku pack
self.generateTarball(node_manufacturer)
self.__file = {'file': open(self.__skuPackTarball, 'rb')}
URL = config.host + config.api_root + '/skus/pack'
logs.info("URL %s", URL)
requests.adapters.DEFAULT_RETRIES = 3
for n in range(0, 5):
res = None
try:
logs.info("Number of attempts to POST the skupack: %s", n + 1)
res = requests.post(URL, files=self.__file)
break
except requests.ConnectionError as err:
logs.info("Request Error: %s", str(err))
self.assertIsNotNone(res, msg='Connection could not be established')
self.assertEqual(201, res.status_code, msg=res.reason)
self.__packFolderId = res.text.split('"')[3]
# Validate that the pack content of workflows has been posted
win_str = '{}::{}'.format(self.__class__.__workflows.get("injectableName"), self.__packFolderId)
logs.info('Workflow injectable name: %s', win_str)
Api().workflows_get_graphs_by_name(win_str)
result = self.__client.last_response
loads(self.__client.last_response.data)
self.assertEqual(200, result.status, msg=res.reason)
# Validate that the pack content of tasks has been posted
tin_str = '{}::{}'.format(self.__class__.__tasks.get("injectableName"), self.__packFolderId)
logs.info(tin_str)
Api().workflows_get_tasks_by_name(tin_str)
result = self.__client.last_response
loads(self.__client.last_response.data)
self.assertEqual(200, result.status, msg=res.reason)
# Check for skupack templates
sku_id = res.json()['id']
Api().templates_meta_get_by_name('template.json', scope=sku_id)
self.assertEqual(200, self.__client.last_response.status)
self.assertEqual(1, len(loads(self.__client.last_response.data)))
# Check for skupack profiles
Api().profiles_get_metadata_by_name('useless.json', scope=sku_id)
self.assertEqual(200, self.__client.last_response.status)
self.assertEqual(1, len(loads(self.__client.last_response.data)))
# """Test DELETE:api/2.0/skus/:identifier/pack"""
Api().skus_id_get(identifier=self.__packFolderId)
result = self.__client.last_response
self.assertEqual(200, result.status,
msg="skus_id_get: expected 200, received {0}".format(result.status))
logs.info("SkuPack FolderId to delete %s", self.__packFolderId)
Api().skus_id_delete_pack(identifier=self.__packFolderId)
result = self.__client.last_response
self.assertEqual(204, result.status,
msg="sku pack folder delete failed, expected {}, received {}, reason {}"
.format(200, result.status, result.reason))
# check to see if skuPack related key is None after the delete pack
Api().skus_get()
skus = loads(self.__client.last_response.data)
for sku in skus:
self.assertEqual(None, sku.get('httpProfileRoot'))
Api().skus_id_delete(self.__packFolderId)
result = self.__client.last_response
self.assertEqual(204, result.status,
msg="sku id delete failed, expected {}, received {}, reason {}"
.format(204, result.status, result.reason))
# check to see if sku contents are cleaned up
try:
Api().skus_id_get(identifier=self.__packFolderId)
self.fail(msg="packFolderId {0} was not expected".format(self.__packFolderId))
except ApiException as e:
result = self.__client.last_response
self.assertEqual(404, e.status,
msg="status = {1}, packFolderId {0} was not expected"
.format(self.__packFolderId, e.status))
@depends(after='test_post_skupack')
def test_put_skupack(self):
# """Test PUT:api/2.0/skus/pack"""
# Post the sku pack
self.generateTarball("Non Quanta")
self.__file = {'file': open(self.__skuPackTarball, 'rb')}
logs.info("****** The POST SKU pack is : %s ", self.__skuPackTarball)
URL = config.host + config.api_root + '/skus/pack'
requests.adapters.DEFAULT_RETRIES = 3
res = None
try:
logs.info("Posting SKU PACK")
res = requests.post(URL, files=self.__file)
except requests.ConnectionError as e:
logs.info("POST Failed. status: %s, Message: %s", e.status, e.message)
return
# sku pack posted
logs.info("****** Posted sku pack: %s", res.text)
self.assertIsNotNone(res, msg='POST: Connection could not be established')
self.assertEqual(201, res.status_code, msg="expected 201, received {0}".format(res.status_code))
self.__packFolderId = res.text.split('"')[3]
logs.info(" FolderId: %s ", self.__packFolderId)
# PUT the sku pack
URL = config.host + '/api/2.0/skus/' + self.__packFolderId + '/pack'
logs.info("****** The PUT URL is %s: ", URL)
logs.info(" The SKU pack is : %s ", self.__skuPackTarball)
requests.adapters.DEFAULT_RETRIES = 3
res = None
try:
with open(self.__skuPackTarball) as fh:
mydata = fh.read()
res = requests.put(URL, data=mydata, headers={'Content-Type': 'application/x-www-form-urlencoded'})
except requests.ConnectionError as e:
logs.info("PUT Failed. status: %s, Message: %s", e.status, e.message)
return
self.assertIsNotNone(res, msg='PUT: Connection could not be established')
self.assertEqual(201, res.status_code, msg="expected 201, received {0}".format(res.status_code))
# sku ids should match from POST and PUT
putId = res.text.split('"')[3]
self.assertEqual(self.__packFolderId, putId, "sku pack ids don't match after post followed by put")
# Clean up
logs.info("SkuPack FolderId to delete %s", self.__packFolderId)
Api().skus_id_delete_pack(identifier=self.__packFolderId)
result = self.__client.last_response
self.assertEqual(204, result.status,
msg="sku pack folder delete failed, expected {0}, received {1}, reason {2}"
.format(204, result.status, result.reason))
logs.info("skupack id delete: %s", self.__packFolderId)
Api().skus_id_delete(self.__packFolderId)
result = self.__client.last_response
self.assertEqual(204, result.status,
msg="sku pack delete failed, expected {0}, received {1}, reason {2}"
.format(204, result.status, result.reason))
def generateTarball(self, ruleUpdate=None):
current_dir = os.getcwd()
if os.path.isdir(self.__rootDir):
shutil.rmtree(self.__rootDir)
os.mkdir(self.__rootDir)
tarballDirs = ["profiles", "static", "tasks", "templates", "workflows"]
for dir in tarballDirs:
os.mkdir(self.__rootDir + dir)
self.__class__.__config_json = {
"name": "Quanta X41",
"rules": [
{
"path": "dmi.Base Board Information.Manufacturer",
"contains": "Quanta"
}
],
"skuConfig": {
"value1": {
"value": "value"
}
},
"workflowRoot": "workflows",
"taskRoot": "tasks",
"httpProfileRoot": "profiles",
"httpTemplateRoot": "templates",
"httpStaticRoot": "static"
}
if ruleUpdate is not None:
self.__class__.__config_json['rules'][0]['contains'] = ruleUpdate
with open(self.__rootDir + 'config.json', 'w') as f:
dump(self.__class__.__config_json, f)
f.close()
self.__class__.__tasks = {
"friendlyName": "Flash Quanta BMC",
"injectableName": "Task.Linux.Flash.unique.Bmc",
"implementsTask": "Task.Base.Linux.Commands",
"options": {
"file": None,
"downloadDir": "/opt/downloads",
"commands": [
"sudo /opt/socflash/socflash_x64 -b /opt/uploads/bmc-backup.bin",
"sudo curl -T /opt/uploads/bmc-backup.bin {{ api.files }}/{{ task.nodeId }}-bmc-backup.bin",
"sudo /opt/socflash/socflash_x64 -s option=x flashtype=2 if={{ options.downloadDir }}/{{ options.file }}"
]
},
"properties": {
"flash": {
"type": "bmc",
"vendor": {
"quanta": {}
}
}
}
}
with open(self.__rootDir + '/tasks/tasks.json', 'w') as f:
dump(self.__class__.__tasks, f)
f.close()
self.__class__.__workflows = {
"friendlyName": "noop-sku-graph",
"injectableName": "Graph.noop-example",
"tasks": [
{
"label": "noop-1",
"taskName": "Task.noop"
},
{
"label": "noop-2",
"taskName": "Task.noop",
"waitOn": {
"noop-1": "finished"
}
}
]
}
with open(self.__rootDir + '/workflows/workflows.json', 'w') as f:
dump(self.__class__.__workflows, f)
f.close()
self.__template = {
"friendlyName": "Flash Quanta BMC",
"injectableName": "Task.Linux.Flash.unique.Bmc",
"implementsTask": "Task.Base.Linux.Commands",
"options": {
"file": None,
"downloadDir": "/opt/downloads",
"commands": [
"sudo /opt/socflash/socflash_x64 -b /opt/uploads/bmc-backup.bin",
"sudo curl -T /opt/uploads/bmc-backup.bin {{ api.files }}/{{ task.nodeId }}-bmc-backup.bin",
"sudo /opt/socflash/socflash_x64 -s option=x flashtype=2 if={{ options.downloadDir }}/{{ options.file }}"
]
},
"properties": {
"flash": {
"type": "bmc",
"vendor": {
"quanta": {}
}
}
}
}
with open(self.__rootDir + 'templates/template.json', 'w') as f:
dump(self.__template, f)
f.close()
self.__profile = {'useless': 'a useless profile'}
with open(self.__rootDir + 'profiles/useless.json', 'w') as f:
dump(self.__profile, f)
f.close()
os.chdir(self.__rootDir)
with tarfile.open(name=self.__skuPackTarball, mode="w:gz") as f:
for name in ["config.json", "profiles", "static", "tasks", "templates", "workflows"]:
f.add(name)
# restore the current directory to the run_tests.py dir
# so it doesn't affect other tests
os.chdir(current_dir)
@classmethod
def clear(self):
""" Clear the test SKU IDs from the testbed """
Api().skus_get()
self.__client.last_response
data = loads(self.__client.last_response.data)
for item in data:
if item['name'] in ['Quanta-D44', 'Quanta-T55', 'mytestsku', 'Quanta X41']:
logs.info("Cleaning skus")
logs.info(item.get("id"))
Api().skus_id_delete(item.get("id"))
|
examples/simple/server.py
|
khadas/android_external_python_pyopenssl
| 5,079 |
143039
|
<reponame>khadas/android_external_python_pyopenssl
# -*- coding: latin-1 -*-
#
# Copyright (C) AB Strakt
# Copyright (C) <NAME>
# See LICENSE for details.
"""
Simple echo server, using nonblocking I/O
"""
from __future__ import print_function
import os
import select
import socket
import sys
from OpenSSL import SSL, crypto
def verify_cb(conn, cert, errnum, depth, ok):
certsubject = crypto.X509Name(cert.get_subject())
commonname = certsubject.commonName
print('Got certificate: ' + commonname)
return ok
if len(sys.argv) < 2:
print('Usage: python server.py PORT')
sys.exit(1)
dir = os.path.dirname(sys.argv[0])
if dir == '':
dir = os.curdir
# Initialize context
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.set_options(SSL.OP_NO_SSLv3)
ctx.set_verify(
SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb
) # Demand a certificate
ctx.use_privatekey_file(os.path.join(dir, 'server.pkey'))
ctx.use_certificate_file(os.path.join(dir, 'server.cert'))
ctx.load_verify_locations(os.path.join(dir, 'CA.cert'))
# Set up server
server = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
server.bind(('', int(sys.argv[1])))
server.listen(3)
server.setblocking(0)
clients = {}
writers = {}
def dropClient(cli, errors=None):
if errors:
print('Client %s left unexpectedly:' % (clients[cli],))
print(' ', errors)
else:
print('Client %s left politely' % (clients[cli],))
del clients[cli]
if cli in writers:
del writers[cli]
if not errors:
cli.shutdown()
cli.close()
while 1:
try:
r, w, _ = select.select(
[server] + list(clients.keys()), list(writers.keys()), []
)
except Exception:
break
for cli in r:
if cli == server:
cli, addr = server.accept()
print('Connection from %s' % (addr,))
clients[cli] = addr
else:
try:
ret = cli.recv(1024).decode('utf-8')
except (SSL.WantReadError,
SSL.WantWriteError,
SSL.WantX509LookupError):
pass
except SSL.ZeroReturnError:
dropClient(cli)
except SSL.Error as errors:
dropClient(cli, errors)
else:
if cli not in writers:
writers[cli] = ''
writers[cli] = writers[cli] + ret
for cli in w:
try:
ret = cli.send(writers[cli])
except (SSL.WantReadError,
SSL.WantWriteError,
SSL.WantX509LookupError):
pass
except SSL.ZeroReturnError:
dropClient(cli)
except SSL.Error as errors:
dropClient(cli, errors)
else:
writers[cli] = writers[cli][ret:]
if writers[cli] == '':
del writers[cli]
for cli in clients.keys():
cli.close()
server.close()
|
tests/test_estimators/test_randomregressor.py
|
lahdjirayhan/scikit-lego
| 784 |
143042
|
import numpy as np
import pytest
from sklego.common import flatten
from sklego.dummy import RandomRegressor
from tests.conftest import nonmeta_checks, regressor_checks, general_checks, select_tests
@pytest.mark.parametrize(
"test_fn",
select_tests(
flatten([general_checks, nonmeta_checks, regressor_checks]),
exclude=[
"check_sample_weights_invariance",
"check_methods_subset_invariance",
"check_regressors_train",
"check_sample_weights_list",
"check_sample_weights_pandas_series"
]
)
)
def test_estimator_checks(test_fn):
# Tests that are skipped:
# 'check_methods_subset_invariance': Since we add noise, the method is not invariant on a subset
# 'check_regressors_train': score is not always greater than 0.5 due to randomness
regr_normal = RandomRegressor(strategy="normal")
test_fn(RandomRegressor.__name__ + "_normal", regr_normal)
regr_uniform = RandomRegressor(strategy="uniform")
test_fn(RandomRegressor.__name__ + "_uniform", regr_uniform)
def test_values_uniform(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="uniform")
predictions = mod.fit(X, y).predict(X)
assert (predictions >= y.min()).all()
assert (predictions <= y.max()).all()
assert mod.min_ == pytest.approx(y.min(), abs=0.0001)
assert mod.max_ == pytest.approx(y.max(), abs=0.0001)
def test_values_normal(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="normal").fit(X, y)
assert mod.mu_ == pytest.approx(np.mean(y), abs=0.001)
assert mod.sigma_ == pytest.approx(np.std(y), abs=0.001)
def test_bad_values():
np.random.seed(42)
X = np.random.normal(0, 1, (10, 2))
y = np.random.normal(0, 1, (10, 1))
with pytest.raises(ValueError):
RandomRegressor(strategy="foobar").fit(X, y)
|
django_quicky/context_processors.py
|
sametmax/django-quicky
| 149 |
143043
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from django.views.debug import get_safe_settings
class SafeSettings(object):
"""
Map attributes to values in the safe settings dict
"""
def __init__(self):
self._settings = get_safe_settings()
def __getattr__(self, name):
try:
return self._settings[name.upper()]
except KeyError:
raise AttributeError
settings_obj = SafeSettings()
def settings(request):
return {'settings': settings_obj}
|
HunterAdminApi/api/authentication/auth_module_factory.py
|
tt9133github/hunter
| 322 |
143069
|
<filename>HunterAdminApi/api/authentication/auth_module_factory.py
#!/ usr/bin/env
# coding=utf-8
#
# Copyright 2019 ztosec & https://sec.zto.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
author: b5mali4
"""
import threading
from model.default_value import Role
from api.authentication.default_auth_module import DefaultAuthModule
from api.authentication.ldap_auth_module import LdapAuthModule
# single_auto_module_lock = threading.Lock()
auto_module_instance = None
# __all__ = ["check_authentication"]
def get_auth_module():
"""
获取单例对象,去除锁
:return:
"""
global auto_module_instance
if not auto_module_instance:
"""
with single_auto_module_lock:
if not auto_module_instance:
auto_module_instance = DefaultAuthModule()
"""
auto_module_instance = DefaultAuthModule()
return auto_module_instance
def check_authentication(role=Role.USER):
"""
检测权限,默认sso
:param role:
:return:
"""
return get_auth_module().check_authentication(role)
|
src/connectors/cisco_umbrella.py
|
sfc-gh-kmaurya/SnowAlert
| 144 |
143082
|
<reponame>sfc-gh-kmaurya/SnowAlert
"""Cisco Umbrella
Collect Cisco Umbrella information using a Client ID and Secret
"""
from runners.helpers import db, log
from runners.helpers.dbconfig import ROLE as SA_ROLE
import requests
from datetime import datetime
from .utils import yaml_dump
PAGE_SIZE = 500
CONNECTION_OPTIONS = [
{
'name': 'api_key',
'title': "Cisco Umbrella API Key",
'prompt': "Your Cisco Umbrella API Key",
'type': 'str',
'required': True,
'secret': True,
},
{
'name': 'api_secret',
'title': "Cisco Umbrella API Secret",
'prompt': "Your Cisco Umbrella API Secret",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'organization_id',
'title': "Cisco Umbrella Organization Id",
'prompt': "Your Cisco Umbrella Organization Id",
'type': 'int',
'required': True,
},
]
LANDING_TABLE_COLUMNS = [
('INSERT_ID', 'NUMBER IDENTITY START 1 INCREMENT 1'),
('SNAPSHOT_AT', 'TIMESTAMP_LTZ(9)'),
('RAW', 'VARIANT'),
('DEVICE_ID', 'VARCHAR(256)'),
('OS_VERSION_NAME', 'VARCHAR(256)'),
('LAST_SYNC_STATUS', 'VARCHAR(256)'),
('TYPE', 'VARCHAR(256)'),
('VERSION', 'VARCHAR(256)'),
('LAST_SYNC', 'TIMESTAMP_LTZ(9)'),
('OS_VERSION', 'VARCHAR(256)'),
('NAME', 'VARCHAR(256)'),
('STATUS', 'VARCHAR(256)'),
('ORIGIN_ID', 'NUMBER(38,0)'),
('APPLIED_BUNDLE', 'NUMBER(38,0)'),
('HAS_IP_BLOCKING', 'BOOLEAN'),
]
def get_data(organization_id: int, key: str, secret: str, params: dict = {}) -> dict:
url = f"https://management.api.umbrella.com/v1/organizations/{organization_id}/roamingcomputers"
headers: dict = {"Content-Type": "application/json", "Accept": "application/json"}
try:
req = requests.get(
url,
params=params,
headers=headers,
auth=requests.auth.HTTPBasicAuth(key, secret),
)
req.raise_for_status()
except requests.HTTPError as http_err:
log.error(f"Error GET: url={url}")
log.error(f"HTTP error occurred: {http_err}")
raise
try:
log.debug(req.status_code)
json = req.json()
except Exception as json_error:
log.error(f"JSON error occurred: {json_error}")
log.debug(f"requests response {req}")
raise
return json
def connect(connection_name, options):
table_name = f'cisco_umbrella_devices_{connection_name}_connection'
landing_table = f'data.{table_name}'
comment = yaml_dump(module='cisco_umbrella', **options)
db.create_table(name=landing_table, cols=LANDING_TABLE_COLUMNS, comment=comment)
db.execute(f'GRANT INSERT, SELECT ON data.{table_name} TO ROLE {SA_ROLE}')
return {
'newStage': 'finalized',
'newMessage': "Cisco Umbrella ingestion table created!",
}
def ingest(table_name, options):
landing_table = f'data.{table_name}'
timestamp = datetime.utcnow()
organization_id = options['organization_id']
api_secret = options['api_secret']
api_key = options['api_key']
params: dict = {"limit": PAGE_SIZE, "page": 1} # API starts at 1
while 1:
devices: dict = get_data(organization_id, api_key, api_secret, params)
params["page"] += 1
if len(devices) == 0:
break
db.insert(
landing_table,
values=[
(
timestamp,
device,
device.get('deviceId'),
device.get('osVersionName', None),
device.get('lastSyncStatus', None),
device.get('type', None),
device.get('version', None),
device.get('lastSync', None),
device.get('osVersion', None),
device.get('name', None),
device.get('status', None),
device.get('originId', None),
device.get('appliedBundle', None),
device.get('hasIpBlocking', None),
)
for device in devices
],
select=db.derive_insert_select(LANDING_TABLE_COLUMNS),
columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS),
)
log.info(f'Inserted {len(devices)} rows.')
yield len(devices)
|
Chapter9/listing9_3.py
|
hohsieh/osgeopy-code
| 160 |
143083
|
<reponame>hohsieh/osgeopy-code
# Script to extract a spatial subset from a raster.
import os
from osgeo import gdal
# Coordinates for the bounding box to extract.
vashon_ulx, vashon_uly = 532000, 5262600
vashon_lrx, vashon_lry = 548500, 5241500
# Don't forget to change the directory.
os.chdir(r'D:\osgeopy-data\Landsat\Washington')
in_ds = gdal.Open('nat_color.tif')
# Create an inverse geotransform for the raster. This converts real-world
# coordinates to pixel offsets.
in_gt = in_ds.GetGeoTransform()
inv_gt = gdal.InvGeoTransform(in_gt)
if gdal.VersionInfo()[0] == '1':
if inv_gt[0] == 1:
inv_gt = inv_gt[1]
else:
raise RuntimeError('Inverse geotransform failed')
elif inv_gt is None:
raise RuntimeError('Inverse geotransform failed')
# Get the offsets that correspond to the bounding box corner coordinates.
offsets_ul = gdal.ApplyGeoTransform(
inv_gt, vashon_ulx, vashon_uly)
offsets_lr = gdal.ApplyGeoTransform(
inv_gt, vashon_lrx, vashon_lry)
# The offsets are returned as floating point, but we need integers.
off_ulx, off_uly = map(int, offsets_ul)
off_lrx, off_lry = map(int, offsets_lr)
# Compute the numbers of rows and columns to extract, based on the offsets.
rows = off_lry - off_uly
columns = off_lrx - off_ulx
# Create an output raster with the correct number of rows and columns.
gtiff_driver = gdal.GetDriverByName('GTiff')
out_ds = gtiff_driver.Create('vashon.tif', columns, rows, 3)
out_ds.SetProjection(in_ds.GetProjection())
# Convert the offsets to real-world coordinates for the georeferencing info.
# We can't use the coordinates above because they don't correspond to the
# pixel edges.
subset_ulx, subset_uly = gdal.ApplyGeoTransform(
in_gt, off_ulx, off_uly)
out_gt = list(in_gt)
out_gt[0] = subset_ulx
out_gt[3] = subset_uly
out_ds.SetGeoTransform(out_gt)
# Loop through the 3 bands.
for i in range(1, 4):
in_band = in_ds.GetRasterBand(i)
out_band = out_ds.GetRasterBand(i)
# Read the data from the input raster starting at the computed offsets.
data = in_band.ReadAsArray(
off_ulx, off_uly, columns, rows)
# Write the data to the output, but no offsets are needed because we're
# filling the entire image.
out_band.WriteArray(data)
del out_ds
|
preprocess/crop_image_sequences.py
|
ashish-roopan/fsgan
| 599 |
143088
|
<reponame>ashish-roopan/fsgan<gh_stars>100-1000
import os
import pickle
import numpy as np
import cv2
from fsgan.utils.bbox_utils import scale_bbox, crop_img
from fsgan.utils.video_utils import Sequence
def main(input_path, output_dir=None, cache_path=None, seq_postfix='_dsfd_seq.pkl', out_postfix='.jpg', resolution=256,
crop_scale=1.2):
cache_path = os.path.splitext(input_path)[0] + seq_postfix if cache_path is None else cache_path
if output_dir is None:
output_dir = os.path.splitext(input_path)[0]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Verification
if not os.path.isfile(input_path):
raise RuntimeError('Input video does not exist: ' + input_path)
if not os.path.isfile(cache_path):
raise RuntimeError('Cache file does not exist: ' + cache_path)
if not os.path.isdir(output_dir):
raise RuntimeError('Output directory does not exist: ' + output_dir)
print('=> Cropping image sequences from image: "%s"...' % os.path.basename(input_path))
# Load sequences from file
with open(cache_path, "rb") as fp: # Unpickling
seq_list = pickle.load(fp)
# Read image from file
img = cv2.imread(input_path)
if img is None:
raise RuntimeError('Failed to read image: ' + input_path)
# For each sequence
for s, seq in enumerate(seq_list):
det = seq[0]
# Crop image
bbox = np.concatenate((det[:2], det[2:] - det[:2]))
bbox = scale_bbox(bbox, crop_scale)
img_cropped = crop_img(img, bbox)
img_cropped = cv2.resize(img_cropped, (resolution, resolution), interpolation=cv2.INTER_CUBIC)
# Write cropped image to file
out_img_name = os.path.splitext(os.path.basename(input_path))[0] + '_seq%02d%s' % (seq.id, out_postfix)
out_img_path = os.path.join(output_dir, out_img_name)
cv2.imwrite(out_img_path, img_cropped)
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('crop_image_sequences')
parser.add_argument('input', metavar='VIDEO',
help='path to input video')
parser.add_argument('-o', '--output', metavar='DIR',
help='output directory')
parser.add_argument('-c', '--cache', metavar='PATH',
help='path to sequence cache file')
parser.add_argument('-sp', '--seq_postfix', default='_dsfd_seq.pkl', metavar='POSTFIX',
help='input sequence file postfix')
parser.add_argument('-op', '--out_postfix', default='.jpg', metavar='POSTFIX',
help='input sequence file postfix')
parser.add_argument('-r', '--resolution', default=256, type=int, metavar='N',
help='output video resolution (default: 256)')
parser.add_argument('-cs', '--crop_scale', default=1.2, type=float, metavar='F',
help='crop scale relative to bounding box (default: 1.2)')
args = parser.parse_args()
main(args.input, args.output, args.cache, args.seq_postfix, args.out_postfix, args.resolution, args.crop_scale)
|
chaospy/distributions/collection/gamma.py
|
utsekaj42/chaospy
| 333 |
143091
|
"""Gamma distribution."""
import numpy
from scipy import special
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class gamma(SimpleDistribution):
def __init__(self, a=1):
super(gamma, self).__init__(dict(a=a))
def _pdf(self, x, a):
return x**(a-1)*numpy.e**(-x)/special.gamma(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _mom(self, k, a):
return special.gamma(a+k)/special.gamma(a)
def _ttr(self, n, a):
return 2.*n+a, n*n+n*(a-1)
def _lower(self, a):
return 0.
def _upper(self, a):
return special.gammaincinv(a, 1-1e-14)
class Gamma(ShiftScaleDistribution):
"""
Gamma distribution.
Also an Erlang distribution when shape=k and scale=1./lamb.
Args:
shape (float, Distribution):
Shape parameter. a>0.
scale (float, Distribution):
Scale parameter. scale!=0
shift (float, Distribution):
Location of the lower bound.
Examples:
>>> distribution = chaospy.Gamma(3, scale=0.5)
>>> distribution
Gamma(3, scale=0.5)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([ 0. , 0.768, 1.143, 1.553, 2.14 , 19.459])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.508, 0.531, 0.432, 0.254, 0. ])
>>> distribution.sample(4).round(3)
array([1.683, 0.587, 3.152, 1.301])
>>> distribution.mom(1).round(3)
1.5
>>> distribution.ttr([0, 1, 2, 3]).round(3)
array([[1.5 , 2.5 , 3.5 , 4.5 ],
[0. , 0.75, 2. , 3.75]])
"""
def __init__(self, shape=1, scale=1, shift=0):
super(Gamma, self).__init__(
dist=gamma(shape),
scale=scale,
shift=shift,
repr_args=[shape],
)
class Exponential(ShiftScaleDistribution):
R"""
Exponential Probability Distribution
Args:
scale (float, Distribution):
Scale parameter. scale!=0
shift (float, Distribution):
Location of the lower bound.
Examples;:
>>> distribution = chaospy.Exponential()
>>> distribution
Exponential()
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([ 0. , 0.223, 0.511, 0.916, 1.609, 32.237])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([1. , 0.8, 0.6, 0.4, 0.2, 0. ])
>>> distribution.sample(4).round(3)
array([1.06 , 0.122, 3.001, 0.658])
>>> distribution.mom(1).round(3)
1.0
>>> distribution.ttr([1, 2, 3]).round(3)
array([[3., 5., 7.],
[1., 4., 9.]])
"""
def __init__(self, scale=1, shift=0):
super(Exponential, self).__init__(
dist=gamma(1),
scale=scale,
shift=shift,
repr_args=[],
)
|
modules/dataset.py
|
MeowTheCat/esrgan-tf2
| 101 |
143094
|
<reponame>MeowTheCat/esrgan-tf2<filename>modules/dataset.py
import tensorflow as tf
def _parse_tfrecord(gt_size, scale, using_bin, using_flip, using_rot):
def parse_tfrecord(tfrecord):
if using_bin:
features = {
'image/img_name': tf.io.FixedLenFeature([], tf.string),
'image/hr_encoded': tf.io.FixedLenFeature([], tf.string),
'image/lr_encoded': tf.io.FixedLenFeature([], tf.string)}
x = tf.io.parse_single_example(tfrecord, features)
lr_img = tf.image.decode_png(x['image/lr_encoded'], channels=3)
hr_img = tf.image.decode_png(x['image/hr_encoded'], channels=3)
else:
features = {
'image/img_name': tf.io.FixedLenFeature([], tf.string),
'image/hr_img_path': tf.io.FixedLenFeature([], tf.string),
'image/lr_img_path': tf.io.FixedLenFeature([], tf.string)}
x = tf.io.parse_single_example(tfrecord, features)
hr_image_encoded = tf.io.read_file(x['image/hr_img_path'])
lr_image_encoded = tf.io.read_file(x['image/lr_img_path'])
lr_img = tf.image.decode_png(lr_image_encoded, channels=3)
hr_img = tf.image.decode_png(hr_image_encoded, channels=3)
lr_img, hr_img = _transform_images(
gt_size, scale, using_flip, using_rot)(lr_img, hr_img)
return lr_img, hr_img
return parse_tfrecord
def _transform_images(gt_size, scale, using_flip, using_rot):
def transform_images(lr_img, hr_img):
lr_img_shape = tf.shape(lr_img)
hr_img_shape = tf.shape(hr_img)
gt_shape = (gt_size, gt_size, tf.shape(hr_img)[-1])
lr_size = int(gt_size / scale)
lr_shape = (lr_size, lr_size, tf.shape(lr_img)[-1])
tf.Assert(
tf.reduce_all(hr_img_shape >= gt_shape),
["Need hr_image.shape >= gt_size, got ", hr_img_shape, gt_shape])
tf.Assert(
tf.reduce_all(hr_img_shape[:-1] == lr_img_shape[:-1] * scale),
["Need hr_image.shape == lr_image.shape * scale, got ",
hr_img_shape[:-1], lr_img_shape[:-1] * scale])
tf.Assert(
tf.reduce_all(hr_img_shape[-1] == lr_img_shape[-1]),
["Need hr_image.shape[-1] == lr_image.shape[-1]], got ",
hr_img_shape[-1], lr_img_shape[-1]])
# randomly crop
limit = lr_img_shape - lr_shape + 1
offset = tf.random.uniform(tf.shape(lr_img_shape), dtype=tf.int32,
maxval=tf.int32.max) % limit
lr_img = tf.slice(lr_img, offset, lr_shape)
hr_img = tf.slice(hr_img, offset * scale, gt_shape)
# randomly left-right flip
if using_flip:
flip_case = tf.random.uniform([1], 0, 2, dtype=tf.int32)
def flip_func(): return (tf.image.flip_left_right(lr_img),
tf.image.flip_left_right(hr_img))
lr_img, hr_img = tf.case(
[(tf.equal(flip_case, 0), flip_func)],
default=lambda: (lr_img, hr_img))
# randomly rotation
if using_rot:
rot_case = tf.random.uniform([1], 0, 4, dtype=tf.int32)
def rot90_func(): return (tf.image.rot90(lr_img, k=1),
tf.image.rot90(hr_img, k=1))
def rot180_func(): return (tf.image.rot90(lr_img, k=2),
tf.image.rot90(hr_img, k=2))
def rot270_func(): return (tf.image.rot90(lr_img, k=3),
tf.image.rot90(hr_img, k=3))
lr_img, hr_img = tf.case(
[(tf.equal(rot_case, 0), rot90_func),
(tf.equal(rot_case, 1), rot180_func),
(tf.equal(rot_case, 2), rot270_func)],
default=lambda: (lr_img, hr_img))
# scale to [0, 1]
lr_img = lr_img / 255
hr_img = hr_img / 255
return lr_img, hr_img
return transform_images
def load_tfrecord_dataset(tfrecord_name, batch_size, gt_size,
scale, using_bin=False, using_flip=False,
using_rot=False, shuffle=True, buffer_size=10240):
"""load dataset from tfrecord"""
raw_dataset = tf.data.TFRecordDataset(tfrecord_name)
raw_dataset = raw_dataset.repeat()
if shuffle:
raw_dataset = raw_dataset.shuffle(buffer_size=buffer_size)
dataset = raw_dataset.map(
_parse_tfrecord(gt_size, scale, using_bin, using_flip, using_rot),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
|
examples/script.py
|
jack1142/zxpy
| 418 |
143100
|
<gh_stars>100-1000
#! /usr/bin/env zxpy
~'echo Hello world!'
def print_file_count():
file_count = ~'ls -1 | wc -l'
~"echo -n 'file count is: '"
print(file_count)
print_file_count()
|
src/deep/custom_layers.py
|
DataForces/CV_LUNA
| 207 |
143117
|
import theano
import theano.tensor as T
import lasagne as nn
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
class SpatialDropoutLayer(Layer):
"""Spatial dropout layer
Sets whole filter activations to zero with probability p. See notes for
disabling dropout during testing.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
p : float or scalar tensor
The probability of setting a value to zero
rescale : bool
If true the input is rescaled with input / (1-p) when deterministic
is False.
Notes
-----
The spatial dropout layer is a regularizer that randomly sets whole the
values of whole features to zero. This is an adaptation of normal dropout,
which is generally useful in fully convolutional settings, such as [1]_.
It is also called a feature dropout layer.
During training you should set deterministic to false and during
testing you should set deterministic to true.
If rescale is true the input is scaled with input / (1-p) when
deterministic is false, see references for further discussion. Note that
this implementation scales the input at training time.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. (2016):
Deep Learning for Human Part Discovery in Images. IEEE
International Conference on Robotics and Automation (ICRA), IEEE,
2016.
"""
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
super(SpatialDropoutLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.p == 0:
return input
else:
# Using theano constant to prevent upcasting
one = T.constant(1)
retain_prob = one - self.p
if self.rescale:
input /= retain_prob
mask = _srng.binomial(input.shape[:2], p=retain_prob,
dtype=theano.config.floatX)
axes = [0, 1] + (['x'] * (input.ndim - 2))
mask = mask.dimshuffle(*axes)
return input * mask
|
qcodes_contrib_drivers/drivers/ZurichInstruments/__init__.py
|
ThorstenGroh/Qcodes_contrib_drivers
| 223 |
143139
|
<filename>qcodes_contrib_drivers/drivers/ZurichInstruments/__init__.py
# empty __init__ file
|
cocos/scientific/kde.py
|
michaelnowotny/cocos
| 101 |
143154
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Copyright 2004-2005 by Enthought, Inc.
#
# The code has been adapted by <NAME> to work with GPUs
# using Cocos from the SciPy code available at
# https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
#
# The open source license of the original code is reproduced below:
#
# Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from cached_property import cached_property
import numbers
import typing as tp
import warnings
from cocos.multi_processing.device_pool import ComputeDevicePool
from cocos.multi_processing.single_device_batch_processing import map_combine_single_device
from cocos.multi_processing.utilities import generate_slices_with_number_of_batches
import cocos.numerics as cn
from cocos.numerics.data_types import NumericArray
import cocos.device as cd
from cocos.numerics.numerical_package_selector import select_num_pack
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
# # from . import mvn
# from scipy.stats import mvn
# from ._stats import gaussian_kernel_estimate
#
# from scipy.stats._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
def _split_points_into_batches(points: NumericArray,
number_of_points_per_batch: int) \
-> tp.List[tp.List[NumericArray]]:
number_of_points = points.shape[1]
n_begin = 0
args_list = []
while n_begin < number_of_points:
n_end = min(n_begin + number_of_points_per_batch, number_of_points)
args_list.append([points[:, n_begin:n_end]])
n_begin = n_end
return args_list
def _check_array_at_right_location_and_convert(array,
gpu: bool,
dtype: np.generic = np.float32):
if isinstance(array, np.ndarray) and gpu:
array = cn.array(array)
if isinstance(array, cn.ndarray) and not gpu:
array = np.array(array)
if array.dtype != dtype:
array = array.astype(dtype)
return array
def ensure_consistent_numeric_arrays(arrays: tp.Iterable[tp.Optional[NumericArray]],
gpu: bool,
dtype: np.generic = np.float32):
return tuple(_check_array_at_right_location_and_convert(array=array, gpu=gpu, dtype=dtype)
if array is not None
else None
for array
in arrays)
def _verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points: NumericArray,
values: NumericArray,
xi: NumericArray) \
-> tp.Tuple[int, int, int]:
n = points.shape[0]
if points.ndim > 1:
d = points.shape[1]
else:
d = 1
m = xi.shape[0]
if values.ndim > 1:
p = values.shape[1]
else:
p = 1
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError(f"points and xi must have same trailing dim but the shape of xi is {xi.shape}")
return n, m, d
def gaussian_kernel_estimate_vectorized_whitened(whitening: NumericArray,
whitened_points: NumericArray,
values: NumericArray,
xi: NumericArray,
norm: float,
dtype: np.generic,
gpu: bool) -> NumericArray:
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=whitened_points,
values=values,
xi=xi)
whitened_points, values, xi, whitening = \
ensure_consistent_numeric_arrays((whitened_points, values, xi, whitening), gpu)
num_pack = select_num_pack(gpu)
whitened_points = whitened_points.astype(dtype, copy=False)
whitened_xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Create the result array and evaluate the weighted sum
whitened_points = whitened_points.reshape((n, 1, d))
whitened_xi = whitened_xi.reshape((1, m, d))
residual = whitened_points - whitened_xi
arg = residual * residual
del residual
if d > 1:
assert arg.shape == (n, m, d)
arg = num_pack.sum(arg, axis=2)
else:
arg = arg.reshape((n, m))
if not gpu:
assert arg.shape == (n, m)
arg = num_pack.exp(- 0.5 * arg) * norm
if not gpu:
assert arg.shape == (n, m)
# estimate = num_pack.dot(arg.T, values)
estimate = (values * arg).sum(axis=0)
if estimate.ndim > 1:
estimate = estimate.squeeze()
if gpu:
cd.sync()
return estimate
def gaussian_kernel_estimate_vectorized(points: NumericArray,
values: NumericArray,
xi: NumericArray,
precision: NumericArray,
dtype: np.generic,
gpu: bool = False) \
-> NumericArray:
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
dtype : the result dtype
gpu : whether to compute the gaussian kernel estimate on the gpu
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
num_pack = select_num_pack(gpu)
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=points,
values=values,
xi=xi)
# n = points.shape[0]
#
# if points.ndim > 1:
# d = points.shape[1]
# else:
# d = 1
# m = xi.shape[0]
#
# if values.ndim > 1:
# p = values.shape[1]
# else:
# p = 1
#
# if p != 1:
# raise ValueError('p != 1 is not supported')
#
# if xi.shape[1] != d:
# raise ValueError("points and xi must have same trailing dim")
# if precision.shape[0] != d or precision.shape[1] != d:
# raise ValueError("precision matrix must match data dims")
points, values, xi, precision = \
ensure_consistent_numeric_arrays((points, values, xi, precision), gpu)
print(f'type(points) = {type(points)}')
print(f'type(values) = {type(values)}')
print(f'type(xi) = {type(xi)}')
print(f'type(precision) = {type(precision)}')
# Rescale the data
whitening = num_pack.linalg.cholesky(precision).astype(dtype, copy=False)
points = num_pack.dot(points, whitening).astype(dtype, copy=False)
# xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2) * num_pack.prod(num_pack.diag(whitening))
# # Create the result array and evaluate the weighted sum
# points = points.reshape((n, 1, d))
# xi = xi.reshape((1, m, d))
# residual = points - xi
# arg = residual * residual
# del residual
# if d > 1:
# assert arg.shape == (n, m, d)
# arg = num_pack.sum(arg, axis=2)
# else:
# arg = arg.reshape((n, m))
# assert arg.shape == (n, m)
# arg = num_pack.exp(- 0.5 * arg) * norm
# assert arg.shape == (n, m)
#
# estimate = num_pack.dot(arg.T, values)
#
# if gpu:
# cd.sync()
#
# return estimate.squeeze()
return gaussian_kernel_estimate_vectorized_whitened(whitening=whitening,
whitened_points=points,
xi=xi,
values=values,
norm=norm,
dtype=dtype,
gpu=gpu)
def gaussian_kernel_estimate(points, values, xi, precision, dtype):
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
n = points.shape[0]
d = points.shape[1]
m = xi.shape[0]
p = values.shape[1]
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError("points and xi must have same trailing dim")
if precision.shape[0] != d or precision.shape[1] != d:
raise ValueError("precision matrix must match data dims")
# Rescale the data
whitening = np.linalg.cholesky(precision).astype(dtype, copy=False)
points_ = np.dot(points, whitening).astype(dtype, copy=False)
xi_ = np.dot(xi, whitening).astype(dtype, copy=False)
values_ = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2)
for i in range(d):
norm *= whitening[i, i]
# Create the result array and evaluate the weighted sum
estimate = np.zeros((m, p), dtype)
for i in range(n):
for j in range(m):
arg = 0
for k in range(d):
residual = (points_[i, k] - xi_[j, k])
arg += residual * residual
arg = np.exp(-arg / 2) * norm
for k in range(p):
estimate[j, k] += values_[i, k] * arg
return np.asarray(estimate)
@dataclass(frozen=True)
class GaussianKDEInformation:
points: np.ndarray # (d, n) shaped array of datapoints
weights: np.ndarray # (d, n) shaped array of weights, optional
dimension: int # data dimension
n: int # number of data points
neff: float # effective sample size
CovarianceFactorFunctionType = tp.Callable[[GaussianKDEInformation], float]
SCOTTS_FACTOR_STRING = 'scotts'
SILVERMAN_FACTOR_STRING = 'silverman'
def compute_scotts_factor(kde_info: GaussianKDEInformation) -> float:
return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
def compute_silverman_factor(kde_info: GaussianKDEInformation) -> float:
d = kde_info.dimension
neff = kde_info.neff
return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
# class CovarianceFactor(ABC):
# @abstractmethod
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# pass
#
#
# class ScottsFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
#
#
# class SilvermanFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# d = kde_info.dimension
# neff = kde_info.neff
# return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
#
#
# class LambdaCovarianceFactor(CovarianceFactor):
# def __init__(self, covariance_factor_fun: tp.Callable[[GaussianKDEInformation], float]):
# self._covariance_factor_fun = covariance_factor_fun
#
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return self._covariance_factor_fun(kde_info)
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
gpu: whether to evaluate the kernel density estimate on the gpu
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] <NAME>, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] <NAME>, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] <NAME>, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] <NAME> and <NAME>, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] <NAME>., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self,
dataset: NumericArray,
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None,
weights: tp.Optional[NumericArray] = None,
gpu: bool = False):
self._num_pack = select_num_pack(gpu)
self._gpu = gpu
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
weights = atleast_1d(weights).astype(float)
weights /= np.sum(weights)
if weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1.0/np.sum(weights**2)
else:
weights = ones(self.n) / self.n
if gpu:
dtype = np.float32
weights = weights.astype(dtype)
self.dataset = self.dataset.astype(dtype)
self._weights = weights
self._covariance_factor = \
self._get_covariance_factor_function_from_bandwidth_type(bw_method)
self._compute_covariance()
def _check_and_adjust_dimensions_of_points(self, points: np.ndarray) \
-> np.ndarray:
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
raise ValueError(f"points have dimension {d}, "
f"dataset has dimension {self.d}")
return points
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = self._check_and_adjust_dimensions_of_points(points)
output_dtype = np.common_type(self.covariance, points)
if True:
# result = gaussian_kernel_estimate_vectorized(points=self.dataset.T,
# values=self.weights[:, None],
# xi=points.T,
# precision=self.inv_cov,
# dtype=output_dtype,
# gpu=self._gpu)
result = gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points.T,
norm=self.normalization_constant,
dtype=output_dtype,
gpu=self._gpu)
return result
else:
result = gaussian_kernel_estimate(points=self.dataset.T,
values=self.weights[:, None],
xi=points.T,
precision=self.inv_cov,
dtype=output_dtype)
return result[:, 0]
__call__ = evaluate
def evaluate_in_batches(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=self.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
def evaluate_in_batches_on_multiple_devices(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int,
compute_device_pool: ComputeDevicePool) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
if self.gpu:
raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
points = self._check_and_adjust_dimensions_of_points(points)
number_of_points = points.shape[1]
args_list = []
for begin_index, end_index in generate_slices_with_number_of_batches(number_of_points,
compute_device_pool.number_of_devices):
args_list.append([points[:, begin_index:end_index]])
# points_per_device = math.floor(number_of_points / gpu_pool.number_of_devices)
# args_list = _split_points_into_batches(points, points_per_device)
kwargs_list = compute_device_pool.number_of_devices * \
[
{'maximum_number_of_elements_per_batch': maximum_number_of_elements_per_batch,
'n': self.n,
'd': self.d}
]
def f(points_internal,
maximum_number_of_elements_per_batch: int,
n: int,
d: int):
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (n * d))
args_list_internal = _split_points_into_batches(points_internal, points_per_batch)
def f_internal(points_internal_internal):
return gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points_internal_internal.T,
norm=self.normalization_constant,
dtype=np.float32,
gpu=True)
result = \
map_combine_single_device(f=f_internal,
combination=lambda x: np.hstack(x),
args_list=args_list_internal)
return result
result = \
compute_device_pool.map_combine(f=f,
combination=lambda x: np.hstack(x),
args_list=args_list,
kwargs_list=kwargs_list)
return result
# def evaluate_in_batches_on_multiple_gpus(self,
# points: NumericArray,
# maximum_number_of_elements_per_batch: int,
# gpu_pool: ComputeDevicePool) \
# -> np.ndarray:
# """
# Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
#
# Args:
# points:
# numeric array with shape (d, m) containing the points at which to evaluate the kernel
# density estimate
# maximum_number_of_elements_per_batch:
# maximum number of data points times evaluation points to process in a single batch
#
# Returns:
# a m-dimensional NumPy array of kernel density estimates
# """
# if self.gpu:
# raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
#
# points = self._check_and_adjust_dimensions_of_points(points)
#
# # number_of_points = points.shape[1]
# points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
#
# args_list = _split_points_into_batches(points, points_per_batch)
#
# def f(x):
# result = gaussian_kernel_estimate_vectorized_whitened(
# whitening=self.whitening,
# whitened_points=self.whitened_points,
# values=self.weights[:, None],
# xi=x.T,
# norm=self.normalization_constant,
# dtype=np.float32,
# gpu=True)
#
# return result
#
# result = \
# gpu_pool.map_combine(f=f,
# combination=lambda x: np.hstack(x),
# args_list=args_list)
#
# return result
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
# def integrate_box(self, low_bounds, high_bounds, maxpts=None):
# """Computes the integral of a pdf over a rectangular interval.
#
# Parameters
# ----------
# low_bounds : array_like
# A 1-D array containing the lower bounds of integration.
# high_bounds : array_like
# A 1-D array containing the upper bounds of integration.
# maxpts : int, optional
# The maximum number of points to use for integration.
#
# Returns
# -------
# value : scalar
# The result of the integral.
#
# """
# if maxpts is not None:
# extra_kwds = {'maxpts': maxpts}
# else:
# extra_kwds = {}
#
# value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
# self.dataset, self.weights,
# self.covariance, **extra_kwds)
# if inform:
# msg = ('An integral in mvn.mvnun requires more points than %s' %
# (self.d * 1000))
# warnings.warn(msg)
#
# return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
Specify `seed` for reproducible drawing of random variates.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
@staticmethod
def _get_covariance_factor_function_from_bandwidth_type(
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None) \
-> CovarianceFactorFunctionType:
"""
Infers the bandwidth selection method from
Args:
bw_method: either 'scotts' or 'silverman' or a scalar or a function returning a float
Returns:
covariance factor function
"""
if bw_method is None:
return compute_scotts_factor
elif isinstance(bw_method, str):
if bw_method == SCOTTS_FACTOR_STRING:
return compute_scotts_factor
elif bw_method == SILVERMAN_FACTOR_STRING:
return compute_silverman_factor
else:
raise ValueError(f'bw_method={bw_method} is not supported')
elif callable(bw_method):
return bw_method
elif np.isscalar(bw_method):
return lambda kde_info: bw_method
else:
raise ValueError(f'bw_method {bw_method} is not supported')
def _compute_covariance(self):
"""
Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
kde_info = GaussianKDEInformation(dimension=self.d,
n=self.n,
neff=self.neff,
points=self.dataset,
weights=self.weights)
self.factor = self._covariance_factor(kde_info)
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = \
atleast_2d(cov(self.dataset,
rowvar=True,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x: np.ndarray) -> NumericArray:
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = zeros((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0) / 2.0
result = logsumexp(-energy.T,
b=self.weights / self._norm_factor, axis=1)
else:
# loop over points
result = zeros((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=self.weights /
self._norm_factor)
return result
@property
def gpu(self) -> bool:
return self._gpu
@property
def weights(self) -> np.ndarray:
return self._weights
@cached_property
def neff(self) -> float:
return 1.0/np.sum(self.weights*self.weights)
@cached_property
def whitening(self) -> NumericArray:
gpu = self.gpu
num_pack = select_num_pack(gpu)
precision = \
ensure_consistent_numeric_arrays((self.inv_cov, ), gpu)[0]
return num_pack.linalg.cholesky(precision)
@cached_property
def whitened_points(self) -> NumericArray:
gpu = self.gpu
num_pack = select_num_pack(gpu)
points = \
ensure_consistent_numeric_arrays((self.dataset.T, ), gpu)[0]
return num_pack.dot(points, self.whitening)
@cached_property
def normalization_constant(self) -> float:
gpu = self.gpu
num_pack = select_num_pack(gpu)
return (2 * np.pi) ** (- self.d / 2) * num_pack.prod(num_pack.diag(self.whitening))
# def evaluate_gaussian_kde_in_batches(kde: gaussian_kde,
# points: NumericArray,
# maximum_number_of_elements_per_batch: int) \
# -> np.ndarray:
# """
# Evaluates a Gaussian KDE in batches and stores the results in main memory.
#
# Args:
# kde: a gaussian_kde object
# points:
# numeric array with shape (d, m) containing the points at which to evaluate the kernel
# density estimate
# maximum_number_of_elements_per_batch:
# maximum number of data points times evaluation points to process in a single batch
#
# Returns:
# a m-dimensional NumPy array of kernel density estimates
# """
# number_of_points = points.shape[1]
# points_per_batch = math.floor(maximum_number_of_elements_per_batch / (kde.n * kde.d))
#
# n_begin = 0
#
# output_array = np.zeros((number_of_points, ), dtype=points.dtype)
#
# while n_begin < number_of_points:
# n_end = min(n_begin + points_per_batch, number_of_points)
# output_array[n_begin:n_end] = np.array(kde.evaluate(points[:, n_begin:n_end]))
# n_begin = n_end
#
# return output_array
def evaluate_gaussian_kde_in_batches(kde: gaussian_kde,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
kde: a gaussian_kde object
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (kde.n * kde.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=kde.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
|
WebMirror/management/rss_parser_funcs/feed_parse_extractLasciviousImouto.py
|
fake-name/ReadableWebProxy
| 193 |
143168
|
def extractLasciviousImouto(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'].replace('-', '.'))
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'The Beast of the 17th District' in item['tags'] or 'the beast of the 17th district' in item['title'].lower():
return buildReleaseMessageWithType(item, 'The Beast of the 17th District', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'Le Festin de Vampire' in item['tags']:
return buildReleaseMessageWithType(item, 'Le Festin de Vampire', vol, chp, frag=frag, postfix=postfix)
return False
|
samples/python/int8_caffe_mnist/calibrator.py
|
martellz/TensorRT
| 5,249 |
143178
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import Image
import numpy as np
# Returns a numpy buffer of shape (num_images, 1, 28, 28)
def load_mnist_data(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2051
num_images = raw_buf[4:8].view(">i4")[0]
image_c = 1
image_h = raw_buf[8:12].view(">i4")[0]
image_w = raw_buf[12:16].view(">i4")[0]
# Need to scale all values to the range of [0, 1]
return np.ascontiguousarray((raw_buf[16:] / 255.0).astype(np.float32).reshape(num_images, image_c, image_h, image_w))
# Returns a numpy buffer of shape (num_images)
def load_mnist_labels(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2049
num_labels = raw_buf[4:8].view(">i4")[0]
return np.ascontiguousarray(raw_buf[8:].astype(np.int32).reshape(num_labels))
class MNISTEntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, training_data, cache_file, batch_size=64):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = cache_file
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
self.data = load_mnist_data(training_data)
self.batch_size = batch_size
self.current_index = 0
# Allocate enough memory for a whole batch.
self.device_input = cuda.mem_alloc(self.data[0].nbytes * self.batch_size)
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.data.shape[0]:
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.data[self.current_index:self.current_index + self.batch_size].ravel()
cuda.memcpy_htod(self.device_input, batch)
self.current_index += self.batch_size
return [self.device_input]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
|
src/test/test_source_merge.py
|
alvarosg/covid-19-open-data
| 430 |
143198
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import main
from pandas import DataFrame
from lib.data_source import DataSource
from .profiled_test_case import ProfiledTestCase
# Synthetic data used for testing
TEST_AUX_DATA = DataFrame.from_records(
[
# Country with no subregions
{
"key": "AA",
"country_code": "AA",
"subregion1_code": None,
"subregion2_code": None,
"match_string": None,
},
# Country with one level-1 subregion
{
"key": "AB",
"country_code": "AB",
"subregion1_code": None,
"subregion2_code": None,
"match_string": None,
},
{
"key": "AB_1",
"country_code": "AB",
"subregion1_code": "1",
"subregion2_code": None,
"match_string": None,
},
# Country with five level-1 subregions
{
"key": "AC",
"country_code": "AC",
"subregion1_code": None,
"subregion2_code": None,
"match_string": None,
},
{
"key": "AC_1",
"country_code": "AC",
"subregion1_code": "1",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AC_2",
"country_code": "AC",
"subregion1_code": "2",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AC_3",
"country_code": "AC",
"subregion1_code": "3",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AC_4",
"country_code": "AC",
"subregion1_code": "4",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AC_5",
"country_code": "AC",
"subregion1_code": "5",
"subregion2_code": None,
"match_string": None,
},
# Country with one level-1 subregion and one level-2 subregion
{
"key": "AD",
"country_code": "AD",
"subregion1_code": None,
"subregion2_code": None,
"match_string": None,
},
{
"key": "AD_1",
"country_code": "AD",
"subregion1_code": "1",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AD_1_1",
"country_code": "AD",
"subregion1_code": "1",
"subregion2_code": "1",
"match_string": None,
},
# Country with one level-1 subregion and five level-2 subregions
{
"key": "AE",
"country_code": "AE",
"subregion1_code": None,
"subregion2_code": None,
"match_string": None,
},
{
"key": "AE_1",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": None,
"match_string": None,
},
{
"key": "AE_1_1",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": "1",
"match_string": None,
},
{
"key": "AE_1_2",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": "2",
"match_string": None,
},
{
"key": "AE_1_3",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": "3",
"match_string": None,
},
{
"key": "AE_1_4",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": "4",
"match_string": None,
},
{
"key": "AE_1_5",
"country_code": "AE",
"subregion1_code": "1",
"subregion2_code": "5",
"match_string": None,
},
]
)
TEST_METADATA_KEYS = set(TEST_AUX_DATA["key"].values)
class TestSourceMerge(ProfiledTestCase):
def test_merge_no_match(self):
aux = TEST_AUX_DATA.copy()
data_source = DataSource()
record = {"country_code": "__"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertTrue(key is None)
def test_merge_by_key(self):
aux = TEST_AUX_DATA.copy()
data_source = DataSource()
record = {"key": "AE_1_2"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, record["key"])
def test_merge_zero_subregions(self):
aux = TEST_AUX_DATA.copy()
data_source = DataSource()
record = {"country_code": "AA"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AA")
def test_merge_one_subregion(self):
aux = TEST_AUX_DATA.copy()
data_source = DataSource()
record = {"country_code": "AB"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertTrue(key is None)
record = {"country_code": "AB", "subregion1_code": None}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AB")
record = {"country_code": "AB", "subregion1_code": "1"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AB_1")
def test_merge_null_vs_empty(self):
aux = TEST_AUX_DATA.copy()
data_source = DataSource()
# Only one record has null region1_code
record = {"country_code": "AD", "subregion1_code": None}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AD")
# Empty means "do not compare" rather than "filter non-null"
record = {"country_code": "AD", "subregion1_code": ""}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, None)
# There are multiple records that fit this merge, so it's ambiguous
record = {"country_code": "AD", "subregion1_code": "1"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, None)
# Match fails because subregion1_code is not null
record = {"country_code": "AD", "subregion1_code": None, "subregion2_code": "1"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, None)
# Match is exact so the merge is unambiguous
record = {"country_code": "AD", "subregion1_code": "1", "subregion2_code": "1"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AD_1_1")
# Even though we don't have subregion1_code, there's only one record that matches
record = {"country_code": "AD", "subregion1_code": "", "subregion2_code": "1"}
key = data_source.merge(record, {"metadata": aux}, keys=TEST_METADATA_KEYS)
self.assertEqual(key, "AD_1_1")
if __name__ == "__main__":
sys.exit(main())
|
notebook/pillow_rotate.py
|
vhn0912/python-snippets
| 174 |
143206
|
<filename>notebook/pillow_rotate.py<gh_stars>100-1000
from PIL import Image
im = Image.open('data/src/lena.jpg')
# 
im_rotate = im.rotate(90)
im_rotate.save('data/dst/lena_rotate_90.jpg', quality=95)
# 
im_rotate = im.rotate(45)
im_rotate.save('data/dst/lena_rotate_45.jpg', quality=95)
# 
im_rotate = im.rotate(45, resample=Image.BICUBIC)
im_rotate.save('data/dst/lena_rotate_45_bicubic.jpg', quality=95)
# 
im_rotate = im.rotate(90, expand=True)
im_rotate.save('data/dst/lena_rotate_90_expand.jpg', quality=95)
# 
im_rotate = im.rotate(45, expand=True)
im_rotate.save('data/dst/lena_rotate_45_expand.jpg', quality=95)
# 
im_rotate = im.rotate(45, center=(0, 60))
im_rotate.save('data/dst/lena_rotate_45_change_center.jpg', quality=95)
# 
im_rotate = im.rotate(45, center=(0, 60), expand=True)
im_rotate.save('data/dst/lena_rotate_45_change_center_expand.jpg', quality=95)
# 
im_rotate = im.rotate(0, translate=(100, 50))
im_rotate.save('data/dst/lena_rotate_0_translate.jpg', quality=95)
# 
im_rotate = im.rotate(45, translate=(100, 50))
im_rotate.save('data/dst/lena_rotate_45_translate.jpg', quality=95)
# 
im_rotate = im.rotate(45, translate=(100, 50), expand=True)
im_rotate.save('data/dst/lena_rotate_45_translate_expand.jpg', quality=95)
# 
im_rotate = im.rotate(45, fillcolor=(255, 128, 0), expand=True)
im_rotate.save('data/dst/lena_rotate_45_fillcolor_expand.jpg', quality=95)
# 
|
utils/wfuzzbasicauthbrute/wfuzz/framework/fuzzer/filter.py
|
ismailbozkurt/kubebot
| 171 |
143209
|
from framework.core.myexception import FuzzException
from threading import Thread
from framework.fuzzer.fuzzobjects import FuzzResult
from framework.utils.myqueue import FuzzQueue
PYPARSING = True
try:
from pyparsing import Word, Group, oneOf, Optional, Suppress, ZeroOrMore, Literal
from pyparsing import ParseException
except ImportError:
PYPARSING = False
class FilterQ(FuzzQueue):
def __init__(self, ffilter, queue_out):
FuzzQueue.__init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def get_name(self):
return 'filter_thread'
def _cleanup(self):
pass
def process(self, prio, item):
if item.is_baseline:
self.baseline = self._set_baseline_fuzz(item)
item.is_visible = self.is_visible(item)
self.send(item)
def _set_baseline_fuzz(self, res):
if "BBB" in self.hideparams['lines']:
self.hideparams['lines'].append(str(res.lines))
if "BBB" in self.hideparams['codes']:
self.hideparams['codes'].append(str(res.code))
if "BBB" in self.hideparams['words']:
self.hideparams['words'].append(str(res.words))
if "BBB" in self.hideparams['chars']:
self.hideparams['chars'].append(str(res.chars))
return res
def __convertIntegers(self, tokens):
return int(tokens[0])
def __compute_element(self, tokens):
element, operator, value = tokens[0]
if value == 'BBB' and self.baseline == None:
raise FuzzException(FuzzException.FATAL, "FilterQ: specify a baseline value when using BBB")
if element == 'c' and value == 'XXX':
value = 0
if value == 'BBB':
if element == 'l':
value = self.baseline.lines
elif element == 'c':
value = self.baseline.code
elif element == 'w':
value = self.baseline.words
elif element == 'h':
value = self.baseline.chars
test = dict(w=self.res.words, c=self.res.code, l=self.res.lines, h=self.res.chars)
value = int(value)
if operator == "=":
return test[element] == value
elif operator == "<=":
return test[element] <= value
elif operator == ">=":
return test[element] >= value
elif operator == "<":
return test[element] < value
elif operator == ">":
return test[element] > value
elif operator == "!=":
return test[element] != value
def __myreduce(self, elements):
first = elements[0]
for i in range(1, len(elements), 2):
if elements[i] == "and":
first = (first and elements[i+1])
elif elements[i] == "or":
first = (first or elements[i+1])
return first
def __compute_formula(self, tokens):
return self.__myreduce(tokens[0])
def is_visible(self, res):
# baseline
if self.baseline and res.is_baseline == True:
return True
filter_string = self.hideparams['filter_string']
if filter_string and PYPARSING:
self.res = res
try:
return self.finalformula.parseString(filter_string)[0]
except ParseException, e:
raise FuzzException(FuzzException.FATAL, "Incorrect filter expression. It should be composed of: c,l,w,h/and,or/=,<,>,!=,<=,>=")
else:
if self.baseline == None and ('BBB' in self.hideparams['codes'] \
or 'BBB' in self.hideparams['lines'] \
or 'BBB' in self.hideparams['words'] \
or 'BBB' in self.hideparams['chars']):
raise FuzzException(FuzzException.FATAL, "FilterQ: specify a baseline value when using BBB")
if self.hideparams['codes_show'] is None:
cond1 = True
else:
cond1 = not self.hideparams['codes_show']
if self.hideparams['regex_show'] is None:
cond2 = True
else:
cond2 = not self.hideparams['regex_show']
if str(res.code) in self.hideparams['codes'] \
or str(res.lines) in self.hideparams['lines'] \
or str(res.words) in self.hideparams['words'] \
or str(res.chars) in self.hideparams['chars']:
cond1 = self.hideparams['codes_show']
if self.hideparams['regex']:
if self.hideparams['regex'].search(res.history.fr_content()):
cond2 = self.hideparams['regex_show']
return (cond1 and cond2)
if __name__ == "__main__":
tests = []
tests.append("(w=200 and w=200) or w=200")
tests.append("(w=400 and w=200) and (w=200 or w=200 or w=000)")
tests.append("(w=200 and l=7) and (h=23)")
tests.append("w=201")
tests.append("w=200")
class t:
code = 200
words = 200
lines = 7
chars = 23
res = t()
f = FilterQ()
for i in tests:
print "%s := %s" % (str(i), f.is_visible(res, i))
|
server/app/services/tasks_scheduler/timer_tasks/app/device_count/__init__.py
|
goodfree/ActorCloud
| 173 |
143212
|
<gh_stars>100-1000
from .count_task import device_count_task
__all__ = ['device_count_task']
|
easy_comment/handlers.py
|
hsyao/django_blog
| 137 |
143219
|
from django.db.models.signals import post_save
from .models import Comment
from notifications.signals import notify
from django.conf import settings
from django.apps import apps
from .tasks import email_handler
def get_recipient():
admins = [i[0] for i in settings.ADMINS]
app_model = settings.AUTH_USER_MODEL.split('.')
user_model = apps.get_model(*app_model)
recipient = user_model.objects.filter(username__in=admins)
return recipient
ADMINS = get_recipient()
SEND_NOTIFICATION_EMAIL = getattr(settings, 'SEND_NOTIFICATION_EMAIL', False)
def user2id(*args):
l = [user.id for user in args]
return l
def comment_handler(sender, instance, created, **kwargs):
if created:
recipient = ADMINS.exclude(id=instance.user.id)
if not instance.parent is None:
recipient = recipient.exclude(id=instance.parent.user.id)
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient,
verb='回复了 %s' % instance.parent.user_name,
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(*recipient))
if not instance.user_name == instance.parent.user_name:
notify.send(instance.user, recipient=instance.parent.user, verb='@了你',
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(instance.parent.user))
else:
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient, verb='发表了评论',
action_object=instance,
target=instance.post,
description=instance.content)
if SEND_NOTIFICATION_EMAIL:
email_handler.delay(user2id(*recipient))
post_save.connect(comment_handler, sender=Comment)
'''
def like_handler(sender, instance, created, **kwargs):
if created:
recipient = ADMINS.exclude(id=instance.user.id).exclude(id=instance.comment.user.id)
verb = '的评论' if instance.comment.parent is None else '的回复'
action = '赞了' if instance.status else '踩了'
if recipient.count() > 0:
notify.send(instance.user, recipient=recipient,
verb=action + instance.comment.user_name + verb,
action_object=instance.comment,
target=instance.comment.post,
description=instance.comment.content)
if (not instance.user.username == instance.comment.user_name) and instance.status:
notify.send(instance.user, recipient=instance.comment.user,
verb='赞了你' + verb,
action_object=instance.comment,
target=instance.comment.post,
description=instance.comment.content)
post_save.connect(like_handler, sender=Like)
'''
|
tests/test_models_artist.py
|
owlwang/spotify.py
| 157 |
143226
|
import asyncio
import unittest
from types import ModuleType
from common import *
class TestArtist(unittest.TestCase):
@async_with_client(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET)
async def test_artist(self, *, client):
for artist_uri in TEST_ARTISTS:
artist = await client.get_artist(artist_uri)
await async_chain([
artist.get_albums(),
artist.get_all_albums(),
artist.total_albums(),
artist.top_tracks(),
artist.related_artists()
])
if __name__ == '__main__':
unittest.main()
|
gunnery/core/tests/test_modals.py
|
timgates42/gunnery
| 314 |
143243
|
from django.core.urlresolvers import reverse
from guardian.shortcuts import assign_perm, get_objects_for_user
from core.models import ServerRole
from core.tests.base import BaseModalTestCase, BaseModalTests, BaseForbiddenModalTests
from core.tests.fixtures import ServerRoleFactory, ApplicationFactory, EnvironmentFactory, ServerFactory
class ModalServerroleForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'serverrole'}
object_factory = ServerRoleFactory
class ModalServerroleTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'serverrole'}
object_factory = ServerRoleFactory
logged_is_manager = True
def test_create(self):
response, obj = self._test_create({'name': 'ServerRoleName'})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
obj = self.object_factory(department=self.department)
data = {'name': 'ServerRoleName2'}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ServerRoleName2')
class ModalApplicationForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'application'}
object_factory = ApplicationFactory
class ModalApplicationTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'application'}
object_factory = ApplicationFactory
logged_is_manager = True
@classmethod
def getSetUpObjectData(cls):
return {'department': cls.department}
def test_create(self):
response, obj = self._test_create({'name': 'ApplicationName'})
self.assertJSONEqual(response.content,
{"status": True,
"action": "redirect",
"target": reverse('application_page', kwargs={'application_id': obj.id})})
def test_edit(self):
obj = self.object_factory(department=self.department)
data = {'name': 'ApplicationName2'}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ApplicationName2')
class ModalEnvironmentForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'environment', 'parent_name': 'application'}
object_factory = EnvironmentFactory
@classmethod
def setUpClass(cls):
super(ModalEnvironmentForbiddenTest, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
class ModalEnvironmentTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'environment', 'parent_name': 'application'}
object_factory = EnvironmentFactory
logged_is_manager = True
application = None
@classmethod
def setUpClass(cls):
super(BaseModalTestCase, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
cls.object = cls.object_factory(**cls.getSetUpObjectData())
@classmethod
def getSetUpObjectData(cls):
return {'application': cls.application}
def test_create(self):
response, obj = self._test_create({'name': 'EnvironmentName', 'application': self.application.id})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
application = ApplicationFactory(department=self.department)
obj = self.object_factory(application=application)
data = {'name': 'EnvironmentName2', 'application': self.application.id}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'EnvironmentName2')
class ModalServerForbiddenTest(BaseModalTestCase, BaseForbiddenModalTests):
url_params = {'form_name': 'server', 'parent_name': 'environment'}
object_factory = ServerFactory
@classmethod
def setUpClass(cls):
super(ModalServerForbiddenTest, cls).setUpClass()
cls.application = ApplicationFactory(department=cls.department)
cls.url_params['parent_id'] = cls.application.id
class ModalServerTest(BaseModalTestCase, BaseModalTests):
url_params = {'form_name': 'server', 'parent_name': 'environment'}
object_factory = ServerFactory
logged_is_manager = True
environment = None
@classmethod
def setUpClass(cls):
super(BaseModalTestCase, cls).setUpClass()
cls.environment = EnvironmentFactory(application=ApplicationFactory(department=cls.department))
cls.url_params['parent_id'] = cls.environment.id
cls.object = cls.object_factory(**cls.getSetUpObjectData())
@classmethod
def getSetUpObjectData(cls):
return {'environment': cls.environment}
def test_create(self):
server_role = ServerRole.objects.filter(department=self.department).first()
response, obj = self._test_create({'name': 'ServerName',
'environment': self.environment.id,
'roles': server_role.id,
'host': 'host',
'port': 22,
'user': 'user',
'method': 1,
})
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
def test_edit(self):
environment = EnvironmentFactory(application=ApplicationFactory(department=self.department))
obj = self.object_factory(environment=environment)
server_role = ServerRole.objects.filter(department=self.department).first()
data = {'name': 'ServerName2',
'environment': self.environment.id,
'roles': server_role.id,
'host': 'host',
'port': 22,
'user': 'user',
'method': 1,}
response, obj_updated = self._test_edit(obj, data)
self.assertJSONEqual(response.content, {"status": True, "action": "reload"})
self.assertEqual(obj_updated.name, 'ServerName2')
|
axelrod/eigen.py
|
RomeroLaura/Axelrod
| 596 |
143260
|
"""
Compute the principal eigenvector of a matrix using power iteration.
See also numpy.linalg.eig which calculates all the eigenvalues and
eigenvectors.
"""
from typing import Tuple
import numpy as np
def _normalise(nvec: np.ndarray) -> np.ndarray:
"""Normalises the given numpy array."""
with np.errstate(invalid="ignore"):
result = nvec / np.sqrt((nvec @ nvec))
return result
def _squared_error(vector_1: np.ndarray, vector_2: np.ndarray) -> float:
"""Computes the squared error between two numpy arrays."""
diff = vector_1 - vector_2
s = diff @ diff
return np.sqrt(s)
def _power_iteration(mat: np.array, initial: np.ndarray) -> np.ndarray:
"""
Generator of successive approximations.
Params
------
mat: numpy.array
The matrix to use for multiplication iteration
initial: numpy.array, None
The initial state. Will be set to np.array([1, 1, ...]) if None
Yields
------
Successive powers (mat ^ k) * initial
"""
vec = initial
while True:
vec = _normalise(np.dot(mat, vec))
yield vec
def principal_eigenvector(
mat: np.array, maximum_iterations=1000, max_error=1e-3
) -> Tuple[np.ndarray, float]:
"""
Computes the (normalised) principal eigenvector of the given matrix.
Params
------
mat: numpy.array
The matrix to use for multiplication iteration
maximum_iterations: int, None
The maximum number of iterations of the approximation
max_error: float, 1e-8
Exit criterion -- error threshold of the difference of successive steps
Returns
-------
ndarray
Eigenvector estimate for the input matrix
float
Eigenvalue corresponding to the returned eigenvector
"""
mat_ = np.array(mat)
size = mat_.shape[0]
initial = np.ones(size)
# Power iteration
if not maximum_iterations:
maximum_iterations = float("inf")
last = initial
for i, vector in enumerate(_power_iteration(mat, initial=initial)):
if i > maximum_iterations:
break
if _squared_error(vector, last) < max_error:
break
last = vector
# Compute the eigenvalue (Rayleigh quotient)
eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector)
# Liberate the eigenvalue from numpy
eigenvalue = float(eigenvalue)
return vector, eigenvalue
|
packages/pyright-internal/src/tests/samples/paramInference1.py
|
Jasha10/pyright
| 3,934 |
143288
|
# This sample tests the logic that infers parameter types based on
# default argument values or annotated base class methods.
class Parent:
def func1(self, a: int, b: str) -> float:
...
class Child(Parent):
def func1(self, a, b):
reveal_type(self, expected_text="Self@Child")
reveal_type(a, expected_text="int")
reveal_type(b, expected_text="str")
return a
def func2(a, b=0, c=None):
reveal_type(a, expected_text="Unknown")
reveal_type(b, expected_text="int")
reveal_type(c, expected_text="Unknown | None")
def func3(a=(1, 2), b=[1,2], c={1: 2}):
reveal_type(a, expected_text="Unknown")
reveal_type(b, expected_text="Unknown")
reveal_type(c, expected_text="Unknown")
|
aurora/nn/pooling.py
|
upul/Aurora
| 111 |
143290
|
from aurora.autodiff.autodiff import Op
from aurora.nn.pyx.fast_pooling import max_pool_forward, max_pool_backward
try:
from aurora.ndarray import gpu_op
except ImportError:
pass
class MaxPoolOp(Op):
def __call__(self, input, filter=(2, 2), strides=(2, 2)):
new_node = Op.__call__(self)
new_node.inputs = [input]
new_node.filter = filter
new_node.strides = strides
new_node.cache = {}
new_node.name = 'MaxPoolOp({})'.format(input.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 1
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
if use_numpy:
output_val[:] = max_pool_forward(input_vals[0],
filter_height=filter_height,
filter_width=filter_width,
stride_height=stride_height,
stride_width=stride_width)
else:
gpu_op.cudnn_pool_forward(input_vals[0],
filter_height, filter_width,
stride_height, stride_width,
'max',
output_val)
node.cache['forward'] = output_val
def gradient(self, node, output_grads):
return [maxPoolBack(node.inputs[0], output_grads, cache=node.cache)]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 1
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
input_batch_size = input_shapes[0][0]
input_n_channels = input_shapes[0][1]
input_height = input_shapes[0][2]
input_width = input_shapes[0][3]
new_height = int((input_height - filter_height) / stride_height) + 1
new_width = int((input_width - filter_width) / stride_width) + 1
return input_batch_size, input_n_channels, new_height, new_width
class MaxPoolGradientOp(Op):
def __call__(self, node_A, node_B, filter=(2, 2), strides=(2, 2), cache=None):
new_node = Op.__call__(self)
# node_B is the output_grad
new_node.inputs = [node_A, node_B]
new_node.filter = filter
new_node.strides = strides
new_node.cache = cache
new_node.name = 'MaxPoolGradientOp(%s)' % (node_A.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
assert len(input_vals) == 2
filter_height = node.filter[0]
filter_width = node.filter[1]
stride_height = node.strides[0]
stride_width = node.strides[1]
data = input_vals[0]
output_grad = input_vals[1]
if use_numpy:
output_val[:] = max_pool_backward(output_grad,
data,
filter_height=filter_height,
filter_width=filter_width,
stride_height=stride_height,
stride_width=stride_width
)
else:
gpu_op.cudnn_pool_backward(data, output_grad, node.cache['forward'],
filter_height, filter_width,
stride_height, stride_width,
'max',
output_val)
def gradient(self, node, output_grads):
raise NotImplementedError('Gradient of AverageGradientOp is not implemented')
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
return input_shapes[0]
# Global singleton operators
maxPool = MaxPoolOp()
maxPoolBack = MaxPoolGradientOp()
|
tensorflow_federated/python/core/backends/xla/execution_contexts.py
|
zhihansh/federated-oss
| 1,918 |
143326
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execution contexts for the XLA backend."""
from tensorflow_federated.python.core.backends.xla import compiler
from tensorflow_federated.python.core.backends.xla import executor
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.execution_contexts import sync_execution_context
from tensorflow_federated.python.core.impl.executors import executor_stacks
def create_local_python_execution_context():
"""Creates an XLA-based local execution context.
NOTE: This context is only directly backed by an XLA executor. It does not
support any intrinsics, lambda expressions, etc.
Returns:
An instance of `execution_context.ExecutionContext` backed by XLA executor.
"""
# TODO(b/175888145): Extend this into a complete local executor stack.
factory = executor_stacks.local_executor_factory(
support_sequence_ops=True,
leaf_executor_fn=executor.XlaExecutor,
local_computation_factory=compiler.XlaComputationFactory())
return sync_execution_context.ExecutionContext(executor_fn=factory)
def set_local_python_execution_context(*args, **kwargs):
"""Sets an XLA-based local execution context.
Invokes `create_local_execution_context` to contruct an execution context,
and sets it as the default. Accepts the same parameters as
`create_local_execution_context`.
Args:
*args: Positional args for `create_local_execution_context`.
**kwargs: Keyword args for `create_local_execution_context`.
"""
context = create_local_python_execution_context(*args, **kwargs)
context_stack_impl.context_stack.set_default_context(context)
|
tests/woa_test/whale_test.py
|
HaaLeo/ant-colony-optimization
| 221 |
143331
|
<filename>tests/woa_test/whale_test.py
# ------------------------------------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
import numpy as np
import pytest
from swarmlib.woa.whale import Whale
# pylint: disable=unused-variable
@pytest.fixture
def test_func():
return lambda x: np.sum(x) # pylint: disable=unnecessary-lambda
@pytest.fixture
def test_object(test_func):
return Whale(
function=test_func,
bit_generator=np.random.default_rng(3),
iteration_number=10)
def describe_whale():
def describe_constructor():
def describe_raise_error():
def if_iteration_number_is_missing(test_func):
with pytest.raises(KeyError):
Whale(function=test_func, bit_generator=np.random.default_rng(3))
def initializes_correctly(test_object):
np.testing.assert_array_equal(test_object.position, [0.34259666857449744, 0.9472420263843988])
np.testing.assert_equal(test_object.value, 1.2898386949588962)
def describe_step():
def describe_attack_prey():
def updates_position_correctly(test_object, test_func):
best = Whale(
function=test_func,
bit_generator=np.random.default_rng(7),
iteration_number=10)
random = Whale(
function=test_func,
bit_generator=np.random.default_rng(5),
iteration_number=10)
test_object = Whale(
function=test_func,
bit_generator=np.random.default_rng(3), # rng with start 3 triggers attacking
iteration_number=10,
b=-1)
# rng with start 3 triggers attacking
test_object.step(best, random)
np.testing.assert_array_equal(test_object.position, [0.4808914501845343, 1.494525117648736])
np.testing.assert_equal(test_object.value, 1.9754165678332702)
def describe_encircle_prey():
def updates_position_correctly(test_object, test_func):
best = Whale(
function=test_func,
bit_generator=np.random.default_rng(6),
iteration_number=10)
random = Whale(
function=test_func,
bit_generator=np.random.default_rng(5),
iteration_number=10)
test_object = Whale(
function=test_func,
bit_generator=np.random.default_rng(1), # rng with start 1 triggers searching
iteration_number=10,
a=0.1)
test_object.step(best, random)
np.testing.assert_array_equal(test_object.position, [1.8313140463093407, 1.5078584439028677])
np.testing.assert_equal(test_object.value, 3.339172490212208)
def describe_search_prey():
def updates_position_correctly(test_object, test_func):
best = Whale(
function=test_func,
bit_generator=np.random.default_rng(6),
iteration_number=10)
random = Whale(
function=test_func,
bit_generator=np.random.default_rng(2),
iteration_number=10)
test_object = Whale(
function=test_func,
bit_generator=np.random.default_rng(0), # rng with start 0 triggers searching (in combination with a=1)
iteration_number=10,
lower_boundary=-1,
a=1)
test_object.step(best, random)
np.testing.assert_array_equal(test_object.position, [3.634068919497446, -0.4827071779367993])
np.testing.assert_equal(test_object.value, 3.1513617415606463)
|
src/oci/ocvp/models/create_esxi_host_details.py
|
ezequielramos/oci-python-sdk
| 249 |
143338
|
<filename>src/oci/ocvp/models/create_esxi_host_details.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateEsxiHostDetails(object):
"""
Details of the ESXi host to add to the SDDC.
"""
#: A constant which can be used with the current_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "HOUR"
CURRENT_SKU_HOUR = "HOUR"
#: A constant which can be used with the current_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "MONTH"
CURRENT_SKU_MONTH = "MONTH"
#: A constant which can be used with the current_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "ONE_YEAR"
CURRENT_SKU_ONE_YEAR = "ONE_YEAR"
#: A constant which can be used with the current_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "THREE_YEARS"
CURRENT_SKU_THREE_YEARS = "THREE_YEARS"
#: A constant which can be used with the next_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "HOUR"
NEXT_SKU_HOUR = "HOUR"
#: A constant which can be used with the next_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "MONTH"
NEXT_SKU_MONTH = "MONTH"
#: A constant which can be used with the next_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "ONE_YEAR"
NEXT_SKU_ONE_YEAR = "ONE_YEAR"
#: A constant which can be used with the next_sku property of a CreateEsxiHostDetails.
#: This constant has a value of "THREE_YEARS"
NEXT_SKU_THREE_YEARS = "THREE_YEARS"
def __init__(self, **kwargs):
"""
Initializes a new CreateEsxiHostDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param sddc_id:
The value to assign to the sddc_id property of this CreateEsxiHostDetails.
:type sddc_id: str
:param display_name:
The value to assign to the display_name property of this CreateEsxiHostDetails.
:type display_name: str
:param current_sku:
The value to assign to the current_sku property of this CreateEsxiHostDetails.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"
:type current_sku: str
:param next_sku:
The value to assign to the next_sku property of this CreateEsxiHostDetails.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"
:type next_sku: str
:param compute_availability_domain:
The value to assign to the compute_availability_domain property of this CreateEsxiHostDetails.
:type compute_availability_domain: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateEsxiHostDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateEsxiHostDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'sddc_id': 'str',
'display_name': 'str',
'current_sku': 'str',
'next_sku': 'str',
'compute_availability_domain': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'sddc_id': 'sddcId',
'display_name': 'displayName',
'current_sku': 'currentSku',
'next_sku': 'nextSku',
'compute_availability_domain': 'computeAvailabilityDomain',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._sddc_id = None
self._display_name = None
self._current_sku = None
self._next_sku = None
self._compute_availability_domain = None
self._freeform_tags = None
self._defined_tags = None
@property
def sddc_id(self):
"""
**[Required]** Gets the sddc_id of this CreateEsxiHostDetails.
The `OCID`__ of the SDDC to add the
ESXi host to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The sddc_id of this CreateEsxiHostDetails.
:rtype: str
"""
return self._sddc_id
@sddc_id.setter
def sddc_id(self, sddc_id):
"""
Sets the sddc_id of this CreateEsxiHostDetails.
The `OCID`__ of the SDDC to add the
ESXi host to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param sddc_id: The sddc_id of this CreateEsxiHostDetails.
:type: str
"""
self._sddc_id = sddc_id
@property
def display_name(self):
"""
Gets the display_name of this CreateEsxiHostDetails.
A descriptive name for the ESXi host. It's changeable.
Esxi Host name requirements are 1-16 character length limit, Must start with a letter, Must be English letters, numbers, - only, No repeating hyphens, Must be unique within the SDDC.
If this attribute is not specified, the SDDC's `instanceDisplayNamePrefix` attribute is used
to name and incrementally number the ESXi host. For example, if you're creating the fourth
ESXi host in the SDDC, and `instanceDisplayNamePrefix` is `MySDDC`, the host's display
name is `MySDDC-4`.
Avoid entering confidential information.
:return: The display_name of this CreateEsxiHostDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateEsxiHostDetails.
A descriptive name for the ESXi host. It's changeable.
Esxi Host name requirements are 1-16 character length limit, Must start with a letter, Must be English letters, numbers, - only, No repeating hyphens, Must be unique within the SDDC.
If this attribute is not specified, the SDDC's `instanceDisplayNamePrefix` attribute is used
to name and incrementally number the ESXi host. For example, if you're creating the fourth
ESXi host in the SDDC, and `instanceDisplayNamePrefix` is `MySDDC`, the host's display
name is `MySDDC-4`.
Avoid entering confidential information.
:param display_name: The display_name of this CreateEsxiHostDetails.
:type: str
"""
self._display_name = display_name
@property
def current_sku(self):
"""
Gets the current_sku of this CreateEsxiHostDetails.
The billing option currently used by the ESXi host.
:func:`list_supported_skus`.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"
:return: The current_sku of this CreateEsxiHostDetails.
:rtype: str
"""
return self._current_sku
@current_sku.setter
def current_sku(self, current_sku):
"""
Sets the current_sku of this CreateEsxiHostDetails.
The billing option currently used by the ESXi host.
:func:`list_supported_skus`.
:param current_sku: The current_sku of this CreateEsxiHostDetails.
:type: str
"""
allowed_values = ["HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"]
if not value_allowed_none_or_none_sentinel(current_sku, allowed_values):
raise ValueError(
"Invalid value for `current_sku`, must be None or one of {0}"
.format(allowed_values)
)
self._current_sku = current_sku
@property
def next_sku(self):
"""
Gets the next_sku of this CreateEsxiHostDetails.
The billing option to switch to after the existing billing cycle ends.
If `nextSku` is null or empty, `currentSku` continues to the next billing cycle.
:func:`list_supported_skus`.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"
:return: The next_sku of this CreateEsxiHostDetails.
:rtype: str
"""
return self._next_sku
@next_sku.setter
def next_sku(self, next_sku):
"""
Sets the next_sku of this CreateEsxiHostDetails.
The billing option to switch to after the existing billing cycle ends.
If `nextSku` is null or empty, `currentSku` continues to the next billing cycle.
:func:`list_supported_skus`.
:param next_sku: The next_sku of this CreateEsxiHostDetails.
:type: str
"""
allowed_values = ["HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"]
if not value_allowed_none_or_none_sentinel(next_sku, allowed_values):
raise ValueError(
"Invalid value for `next_sku`, must be None or one of {0}"
.format(allowed_values)
)
self._next_sku = next_sku
@property
def compute_availability_domain(self):
"""
Gets the compute_availability_domain of this CreateEsxiHostDetails.
The availability domain to create the ESXi host in.
If keep empty, for AD-specific SDDC, new ESXi host will be created in the same availability domain;
for multi-AD SDDC, new ESXi host will be auto assigned to the next availability domain following evenly distribution strategy.
:return: The compute_availability_domain of this CreateEsxiHostDetails.
:rtype: str
"""
return self._compute_availability_domain
@compute_availability_domain.setter
def compute_availability_domain(self, compute_availability_domain):
"""
Sets the compute_availability_domain of this CreateEsxiHostDetails.
The availability domain to create the ESXi host in.
If keep empty, for AD-specific SDDC, new ESXi host will be created in the same availability domain;
for multi-AD SDDC, new ESXi host will be auto assigned to the next availability domain following evenly distribution strategy.
:param compute_availability_domain: The compute_availability_domain of this CreateEsxiHostDetails.
:type: str
"""
self._compute_availability_domain = compute_availability_domain
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateEsxiHostDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateEsxiHostDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateEsxiHostDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateEsxiHostDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateEsxiHostDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateEsxiHostDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateEsxiHostDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateEsxiHostDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
atcoder/arc098/c.py
|
Ashindustry007/competitive-programming
| 506 |
143345
|
#!/usr/bin/env python3
# https://arc098.contest.atcoder.jp/tasks/arc098_a
n = int(input())
s = input()
a = [0] * n
if s[0] == 'W': a[0] = 1
for i in range(1, n):
c = s[i]
if c == 'E':
a[i] = a[i - 1]
else:
a[i] = a[i - 1] + 1
b = [0] * n
if s[n - 1] == 'E': b[n - 1] = 1
for i in range(n - 2, -1, -1):
c = s[i]
if c == 'W':
b[i] = b[i + 1]
else:
b[i] = b[i + 1] + 1
m = n
for i in range(n):
m = min(m, a[i] + b[i])
print(m - 1)
|
official/vision/beta/modeling/decoders/aspp_test.py
|
NasTul/models
| 82,518 |
143348
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for aspp."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.modeling.backbones import resnet
from official.vision.beta.modeling.decoders import aspp
class ASPPTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(3, [6, 12, 18, 24], 128),
(3, [6, 12, 18], 128),
(3, [6, 12], 256),
(4, [6, 12, 18, 24], 128),
(4, [6, 12, 18], 128),
(4, [6, 12], 256),
)
def test_network_creation(self, level, dilation_rates, num_filters):
"""Test creation of ASPP."""
input_size = 256
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = resnet.ResNet(model_id=50)
network = aspp.ASPP(
level=level,
dilation_rates=dilation_rates,
num_filters=num_filters)
endpoints = backbone(inputs)
feats = network(endpoints)
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, num_filters],
feats[str(level)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
level=3,
dilation_rates=[6, 12],
num_filters=256,
pool_kernel_size=None,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='relu',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
interpolation='bilinear',
dropout_rate=0.2,
use_depthwise_convolution='false',
)
network = aspp.ASPP(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = aspp.ASPP.from_config(network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
|
tests/providers/kubernetes/k8s_mocks.py
|
zkck/cloud-forensics-utils
| 241 |
143361
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kubernetes mock response objects, used for testing."""
from typing import Dict, Optional
from unittest import mock
from kubernetes import client
MOCK_API_CLIENT = mock.Mock()
Labels = Dict[str, str]
# pylint: disable=line-too-long
def V1ObjectMeta(
name: Optional[str] = None,
namespace: Optional[str] = None,
labels: Optional[Labels] = None) -> client.V1ObjectMeta:
"""Make Kubernetes API response metadata, see V1ObjectMeta.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1ObjectMeta.md
"""
return client.V1ObjectMeta(name=name, namespace=namespace, labels=labels)
def V1NodeList(amount: int) -> client.V1NodeList:
"""Make Kubernetes API Node list response, see V1NodeList.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1NodeList.md
"""
items = [V1Node('node-{0:d}'.format(i)) for i in range(amount)]
return client.V1NodeList(items=items)
def V1PodList(amount: int) -> client.V1PodList:
"""Make Kubernetes API Pod list response, see V1PodList.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodList.md
"""
items = [V1Pod(name='pod-{0:d}'.format(i)) for i in range(amount)]
return client.V1PodList(items=items)
def V1NetworkPolicyList(
amount: int, namespace: str) -> client.V1NetworkPolicyList:
"""Make Kubernetes API NetworkPolicy list, see V1NetworkPolicyList.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1NetworkPolicyList.md
"""
items = [
V1NetworkPolicy('netpol-{0:d}'.format(i), namespace)
for i in range(amount)
]
return client.V1NetworkPolicyList(items=items)
def V1NetworkPolicy(name: str, namespace: str) -> client.V1NetworkPolicy:
"""Make Kubernetes API NetworkPolicy response, see V1NetworkPolicy.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1NetworkPolicy.md
"""
return client.V1NetworkPolicy(
metadata=V1ObjectMeta(name=name, namespace=namespace))
def V1Service(selector_labels: Labels) -> client.V1Service:
"""Make Kubernetes API service response, see V1Service.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Service.md
"""
return client.V1Service(spec=client.V1ServiceSpec(selector=selector_labels))
def V1Node(name: str) -> client.V1Node:
"""Make Kubernetes API Node response, see V1Node.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Node.md
"""
return client.V1Node(metadata=V1ObjectMeta(name=name))
def V1Pod(
name: Optional[str] = None,
namespace: Optional[str] = None,
node_name: Optional[str] = None,
labels: Optional[Labels] = None) -> client.V1Pod:
"""Make Kubernetes API Pod response, see V1Pod.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md
"""
return client.V1Pod(
metadata=V1ObjectMeta(name=name, namespace=namespace, labels=labels),
spec=client.V1PodSpec(node_name=node_name, containers=[]))
def V1PodTemplateSpec(labels: Labels) -> client.V1PodTemplateSpec:
"""Make Kubernetes API template spec response, see V1PodTemplateSpec.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodTemplateSpec.md
"""
return client.V1PodTemplateSpec(metadata=V1ObjectMeta(labels=labels))
def V1ReplicaSet(
name: Optional[str] = None,
namespace: Optional[str] = None,
template_spec_labels: Optional[Labels] = None) -> client.V1ReplicaSet:
"""Make Kubernetes API ReplicaSet response, V1ReplicaSet.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1ReplicaSet.md
"""
return client.V1ReplicaSet(
metadata=V1ObjectMeta(name=name, namespace=namespace),
spec=client.V1ReplicaSetSpec(
selector=client.V1LabelSelector(),
template=V1PodTemplateSpec(template_spec_labels or {})))
def V1Deployment(
name: Optional[str] = None,
namespace: Optional[str] = None,
template_spec_labels: Optional[Labels] = None,
match_labels: Optional[Labels] = None) -> client.V1Deployment:
"""Make Kubernetes API response deployment, see V1Deployment.
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Deployment.md
"""
return client.V1Deployment(
metadata=V1ObjectMeta(name=name, namespace=namespace),
spec=client.V1DeploymentSpec(
selector=client.V1LabelSelector(match_labels=match_labels),
template=V1PodTemplateSpec(template_spec_labels or {})))
|
src/converters/pipeline_tester.py
|
u0251077/InsightFace-REST
| 236 |
143362
|
<filename>src/converters/pipeline_tester.py<gh_stars>100-1000
from modules.face_model import FaceAnalysis
import logging
import cv2
import time
logging.basicConfig(
level='DEBUG',
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='[%H:%M:%S]',
)
model = FaceAnalysis(max_size=[640, 640], backend_name='trt', det_name='retinaface_mnet025_v1', max_rec_batch_size=64)
# Warmup
image_path = 'test_images/Stallone.jpg'
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
faces = model.get(image)
#Test
iters = 50
t0 = time.time()
image_path = 'test_images/lumia.jpg'
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
for i in range(iters):
tf0 = time.time()
faces = model.get(image)
tf1 = time.time()
logging.debug(f"Full detection took: {tf1 - tf0}")
t1 = time.time()
#print(faces)
print(f'Took {t1 - t0} s. ({iters / (t1 - t0)} im/sec)')
|
tests/validation/response/test_response_content_type_validation.py
|
maroux/flex
| 160 |
143376
|
import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.validation.response import (
validate_response,
)
from tests.factories import (
SchemaFactory,
ResponseFactory,
)
from tests.utils import assert_message_in_errors
def test_response_content_type_validation():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type='application/json',
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_response_content_type_validation_when_no_content_type_specified():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type=None,
)
# this is considered valid currently, but may change
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_response_content_type_validation_ignores_parameters():
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {
'get': {
'responses': {'200': {'description': 'Success'}},
}
},
},
)
response = ResponseFactory(
url='http://www.example.com/get',
content_type='application/json; charset=UTF-8',
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
|
teether/util/frontierset.py
|
s-mallow/teether-erc20
| 102 |
143400
|
from collections import defaultdict
class FrontierSet(object):
"""
A set that also maintains a partial topological ordering
The current set of "non-blocked" items can be obtained as
.frontier
"""
def __init__(self, data=None):
self._inhibiting_set = defaultdict(set)
self._blocking_set = defaultdict(set)
self._edges = set()
self._frontier = set()
self._frozenedges = None
self._frozenfrontier = None
self._frozenall = None
if data:
for d in data:
self.add(d)
def _invalidate(self):
self._frozenedges = None
self._frozenfrontier = None
self._frozenall = None
@property
def edges(self):
if self._frozenedges is None:
self._frozenedges = frozenset(self._edges)
return self._frozenedges
@property
def frontier(self):
if self._frozenfrontier is None:
self._frozenfrontier = frozenset(self._frontier)
return self._frozenfrontier
@property
def all(self):
if self._frozenall is None:
self._frozenall = frozenset(set(self._blocking_set.keys()) | set(self._inhibiting_set.keys()) | self._frontier)
return self._frozenall
def add(self, a, b=None):
"""
Add a to the set.
If b is given, require that a is a necessary prerequisite for b
:param a:
:param b:
:return:
"""
self._invalidate()
if b:
self._edges.add((a, b))
self._inhibiting_set[b].add(a)
self._blocking_set[a].add(b)
if not self._inhibiting_set[a]:
self._frontier.add(a)
self._frontier.discard(b)
else:
self._frontier.add(a)
def remove(self, a):
self._invalidate()
for b in self._blocking_set[a]:
self._edges.discard((b, a))
self._inhibiting_set[b].discard(a)
if not self._inhibiting_set[b]:
self._frontier.add(b)
for c in self._inhibiting_set[a]:
self._edges.discard((a, c))
self._blocking_set[c].discard(a)
del self._blocking_set[a]
del self._inhibiting_set[a]
self._frontier.discard(a)
def copy(self):
new = FrontierSet()
new._inhibiting_set = self._inhibiting_set.copy()
new._blocking_set = self._blocking_set.copy()
new._edges = self._edges.copy()
new._frontier = self._frontier.copy()
new._invalidate()
return new
def issubset(self, other):
return self.all.issubset(other.all) and self.edges.issubset(other.edges)
def __len__(self):
return len(self.all)
def __eq__(self, other):
return self.edges == other.edges and self.all == other.all
def __hash__(self):
return 3 * hash(self.edges) + 7 * hash(self.all)
def __iter__(self):
return iter(self.all)
def __repr__(self):
return '{%s|%s}' % (
','.join('%x' % i for i in self.frontier), ','.join('%x' % i for i in self.all - self.frontier))
|
KG/DuEL_Baseline/data/eval.py
|
pkulzb/Research
| 1,319 |
143406
|
<reponame>pkulzb/Research
# !/bin/env python
# -*- coding: utf-8 -*-
#####################################################################################
#
# Copyright (c) CCKS 2020 Entity Linking Organizing Committee.
# All Rights Reserved.
#
#####################################################################################
"""
@version 2020-03-30
@brief:
Entity Linking效果评估脚本,评价指标Micro-F1
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
from collections import defaultdict
class Eval(object):
"""
Entity Linking Evaluation
"""
def __init__(self, golden_file_path, user_file_path):
self.golden_file_path = golden_file_path
self.user_file_path = user_file_path
self.tp = 0
self.fp = 0
self.total_recall = 0
self.errno = None
def format_check(self, file_path):
"""
文件格式验证
:param file_path: 文件路径
:return: Bool类型:是否通过格式检查,通过为True,反之False
"""
flag = True
for line in open(file_path):
json_info = json.loads(line.strip())
if 'text_id' not in json_info:
flag = False
self.errno = 1
break
if 'text' not in json_info:
flag = False
self.errno = 2
break
if 'mention_data' not in json_info:
flag = False
self.errno = 3
break
if not isinstance(json_info['text_id'], unicode):
flag = False
self.errno = 4
break
if not json_info['text_id'].isdigit():
flag = False
self.errno = 5
break
if not isinstance(json_info['text'], unicode):
flag = False
self.errno = 6
break
if not isinstance(json_info['mention_data'], list):
flag = False
self.errno = 7
break
for mention_info in json_info['mention_data']:
if 'kb_id' not in mention_info:
flag = False
self.errno = 7
break
if 'mention' not in mention_info:
flag = False
self.errno = 8
break
if 'offset' not in mention_info:
flag = False
self.errno = 9
break
if not isinstance(mention_info['kb_id'], unicode):
flag = False
self.errno = 10
break
if not isinstance(mention_info['mention'], unicode):
flag = False
self.errno = 11
break
if not isinstance(mention_info['offset'], unicode):
flag = False
self.errno = 12
break
if not mention_info['offset'].isdigit():
flag = False
self.errno = 13
break
return flag
def micro_f1(self):
"""
:return: float类型:精确率,召回率,Micro-F1值
"""
# 文本格式验证
flag_golden = self.format_check(self.golden_file_path)
flag_user = self.format_check(self.user_file_path)
# 格式验证失败直接返回None
if not flag_golden or not flag_user:
return None, None, None
precision = 0
recall = 0
self.tp = 0
self.fp = 0
self.total_recall = 0
golden_dict = defaultdict(list)
for line in open(self.golden_file_path):
golden_info = json.loads(line.strip())
text_id = golden_info['text_id']
text = golden_info['text']
mention_data = golden_info['mention_data']
for mention_info in mention_data:
kb_id = mention_info['kb_id']
mention = mention_info['mention']
offset = mention_info['offset']
key = '\1'.join(
[text_id, text, mention, offset],
).encode('utf8')
# value的第二个元素表示标志位,用于判断是否已经进行了统计
golden_dict[key] = [kb_id, 0]
self.total_recall += 1
# 进行计算
for line in open(self.user_file_path):
golden_info = json.loads(line.strip())
text_id = golden_info['text_id']
text = golden_info['text']
mention_data = golden_info['mention_data']
for mention_info in mention_data:
kb_id = mention_info['kb_id']
mention = mention_info['mention']
offset = mention_info['offset']
key = '\1'.join(
[text_id, text, mention, offset],
).encode('utf8')
if key in golden_dict:
kb_result_golden = golden_dict[key]
if kb_id.isdigit():
if kb_id in [kb_result_golden[0]] and kb_result_golden[1] in [0]:
self.tp += 1
else:
self.fp += 1
else:
continue
# nil golden结果
nil_res = kb_result_golden[0].split('|')
if kb_id in nil_res and kb_result_golden[1] in [0]:
self.tp += 1
else:
self.fp += 1
golden_dict[key][1] = 1
else:
self.fp += 1
if self.tp + self.fp > 0:
precision = float(self.tp) / (self.tp + self.fp)
if self.total_recall > 0:
recall = float(self.tp) / self.total_recall
a = 2 * precision * recall
b = precision + recall
if b == 0:
return 0, 0, 0
f1 = a / b
return precision, recall, f1
if __name__ == '__main__':
# utf-8格式
# 输入golden文件,预测文件
eval = Eval('./basic_data/test_result.json', './generated/test_pred.json')
prec, recall, f1 = eval.micro_f1()
print prec, recall, f1
if eval.errno:
print eval.errno
|
datasets/MOT/seed/Impl/MOT20.py
|
zhangzhengde0225/SwinTrack
| 143 |
143429
|
from datasets.MOT.constructor.base_interface import MultipleObjectTrackingDatasetConstructor
def get_mot_class_definition():
return {
1: 'Pedestrian',
2: 'Person on vehicle',
3: 'Car',
4: 'Bicycle',
5: 'Motorbike',
6: 'Non motorized vehicle',
7: 'Static person',
8: 'Distractor',
9: 'Occluder',
10: 'Occluder on the ground',
11: 'Occluder full',
12: 'Reflection',
13: '(Unknown)'
}
def get_mot20_sequences_from_path(sequences):
valid_sequences = {}
for sequence in sequences:
words = sequence.split('-')
assert len(words) == 2
assert words[0] == 'MOT20'
if words[1] not in valid_sequences:
valid_sequences[words[1]] = sequence
return valid_sequences.values()
def construct_MOT20(constructor: MultipleObjectTrackingDatasetConstructor, seed):
from .MOT17 import construct_MOT
construct_MOT(constructor, seed, get_mot20_sequences_from_path, get_mot_class_definition())
|
lineflow/datasets/cnn_dailymail.py
|
yasufumy/textflow
| 138 |
143453
|
import io
import os
import pickle
import tarfile
from functools import lru_cache
from typing import Dict, Tuple
import arrayfiles
import gdown
from lineflow import download
from lineflow.core import ZipDataset
def get_cnn_dailymail() -> Dict[str, Tuple[arrayfiles.TextFile]]:
url = 'https://s3.amazonaws.com/opennmt-models/Summary/cnndm.tar.gz'
root = download.get_cache_directory(os.path.join('datasets', 'cnn_dailymail'))
def creator(path):
archive_path = gdown.cached_download(url)
target_path = os.path.join(root, 'raw')
with tarfile.open(archive_path, 'r') as archive:
print(f'Extracting to {target_path}')
archive.extractall(target_path)
dataset = {}
for split in ('train', 'dev', 'test'):
src_path = f'{split if split != "dev" else "val"}.txt.src'
tgt_path = f'{split if split != "dev" else "val"}.txt.tgt.tagged'
dataset[split] = (
arrayfiles.TextFile(os.path.join(target_path, src_path)),
arrayfiles.TextFile(os.path.join(target_path, tgt_path))
)
with io.open(path, 'wb') as f:
pickle.dump(dataset, f)
return dataset
def loader(path):
with io.open(path, 'rb') as f:
return pickle.load(f)
pkl_path = os.path.join(root, 'cnndm.pkl')
return download.cache_or_load_file(pkl_path, creator, loader)
cached_get_cnn_dailymail = lru_cache()(get_cnn_dailymail)
class CnnDailymail(ZipDataset):
def __init__(self, split: str = 'train') -> None:
if split not in {'train', 'dev', 'test'}:
raise ValueError(f"only 'train', 'dev' and 'test' are valid for 'split', but '{split}' is given.")
raw = cached_get_cnn_dailymail()
super(CnnDailymail, self).__init__(*raw[split])
|
nni/algorithms/compression/pytorch/quantization/__init__.py
|
dutxubo/nni
| 9,680 |
143459
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .bnn_quantizer import BNNQuantizer
from .dorefa_quantizer import DoReFaQuantizer
from .lsq_quantizer import LsqQuantizer
from .native_quantizer import NaiveQuantizer
from .observer_quantizer import ObserverQuantizer
from .qat_quantizer import QAT_Quantizer
__all__ = ['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer', 'LsqQuantizer', 'ObserverQuantizer']
|
ch06-Drawing function/draw_circle.py
|
Anancha/OpenCV-Python-Tutorial
| 2,875 |
143464
|
<reponame>Anancha/OpenCV-Python-Tutorial
# -*- coding: utf-8 -*-
# @Time : 2017/7/17 下午12:03
# @Author : play4fun
# @File : 画圆圈.py
# @Software: PyCharm
"""
画圆圈.py:随机覆盖,不同颜色,
"""
from time import sleep
import cv2
import numpy as np
def click_event(event, x, y, flags, param):
'''
用左键点击屏幕,打印坐标
:param event:
:param x:
:param y:
:param flags:
:param param:
:return:
'''
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y, flags, param)
cv2.namedWindow('Canvas', cv2.WINDOW_GUI_EXPANDED)
cv2.setMouseCallback("Canvas", click_event)
canvas = np.zeros((300, 300, 3), dtype="uint8")
while True:
try:
for i in range(0, 25):
radius = np.random.randint(5, high=200)
color = np.random.randint(0, high=256, size=(3,)).tolist()
pt = np.random.randint(0, high=300, size=(2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
key = cv2.waitKey(1000) # 等待1秒
if key == ord('q'):
break
else:
# sleep(1)
continue
except KeyboardInterrupt as e:
print('KeyboardInterrupt', e)
finally:
cv2.imwrite('random-circles2.jpg', canvas)
|
scripts/utils/utils.py
|
pazamelin/openvino
| 2,406 |
143468
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
import tarfile
from datetime import datetime
from shutil import copyfile, copytree, rmtree
major_version = 0
minor_version = 3
class Automation:
@staticmethod
def parse_bom(bom_path):
files = []
for file in open(bom_path):
files.append(file)
return files
@staticmethod
def copy_files_from_bom(root_path, bom):
target_dir = os.path.join(os.path.dirname(__file__), "tools_package")
if os.path.exists(target_dir):
rmtree(target_dir)
os.makedirs(target_dir)
for file in bom:
src = os.path.join(root_path, file.strip('\n'))
dst = os.path.join(target_dir, file.strip('\n'))
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if os.path.isdir(src):
copytree(src, dst)
else:
copyfile(src, dst)
return target_dir
@staticmethod
def add_version_txt(dst_path, build_number, git_hash_short):
git_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip("\n")
if git_hash_short == "0":
git_hash_short = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode("utf-8").strip(
"\n")
verson = "{0}.{1}.{2}.{3}".format(major_version, minor_version, build_number, git_hash_short)
timestamp = datetime.now().strftime("%I:%M%p %B %d, %Y")
with open(os.path.join(dst_path, "version.txt"), 'w') as f:
f.write(timestamp + '\n')
f.write(verson + '\n')
f.write(git_hash + '\n')
return verson
@staticmethod
def make_tarfile(out_file_name, source_dir):
archive_path = os.path.join(os.path.dirname(__file__), out_file_name)
if os.path.exists(archive_path):
os.remove(archive_path)
with tarfile.open(out_file_name, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
|
alipay/aop/api/domain/AlipayTradeRepaybillModifyModel.py
|
antopen/alipay-sdk-python-all
| 213 |
143480
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayTradeRepaybillModifyModel(object):
def __init__(self):
self._amount = None
self._bill_no = None
self._bill_status = None
self._operation_type = None
self._out_request_no = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def bill_no(self):
return self._bill_no
@bill_no.setter
def bill_no(self, value):
self._bill_no = value
@property
def bill_status(self):
return self._bill_status
@bill_status.setter
def bill_status(self, value):
self._bill_status = value
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.bill_no:
if hasattr(self.bill_no, 'to_alipay_dict'):
params['bill_no'] = self.bill_no.to_alipay_dict()
else:
params['bill_no'] = self.bill_no
if self.bill_status:
if hasattr(self.bill_status, 'to_alipay_dict'):
params['bill_status'] = self.bill_status.to_alipay_dict()
else:
params['bill_status'] = self.bill_status
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeRepaybillModifyModel()
if 'amount' in d:
o.amount = d['amount']
if 'bill_no' in d:
o.bill_no = d['bill_no']
if 'bill_status' in d:
o.bill_status = d['bill_status']
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
return o
|
ch7/points_lines.py
|
lyskevin/cpbook-code
| 1,441 |
143518
|
<reponame>lyskevin/cpbook-code
import math
INF = 10**9
EPS = 1e-9
def DEG_to_RAD(d):
return d*math.pi/180.0
def RAD_to_DEG(r):
return r*180.0/math.pi
class point_i:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __lt__(self, other):
return (self.x, self.y) < (other.x, other.y)
def __eq__(self, other):
return math.isclose(self.x, other.x) and math.isclose(self.y, other.y)
def dist(p1, p2):
return math.hypot(p1.x-p2.x, p1.y-p2.y)
def rotate(p, theta):
rad = DEG_to_RAD(theta)
x = p.x * math.cos(rad) - p.y * math.sin(rad)
y = p.x * math.sin(rad) + p.y * math.cos(rad)
return point(x, y)
class line:
def __init__(self):
self.a = 0
self.b = 0
self.c = 0
def pointsToLine(p1, p2, l):
if abs(p1.x - p2.x) < EPS:
l.a, l.b, l.c = 1.0, 0.0, -p1.x
else:
a = -(p1.y - p2.y) / (p1.x - p2.x)
l.a, l.b, l.c = a, 1.0, -(a * p1.x) - p1.y
class line2:
def __init__(self):
self.m = 0
self.c = 0
def pointsToLine2(p1, p2, l):
if p1.x == p2.x:
l.m = INF
l.c = p1.x
return 0
else:
l.m = (p1.y - p2.y) / (p1.x - p2.x)
l.c = p1.y - l.m * p1.x
return 1
def areParallel(l1, l2):
return math.isclose(l1.a, l2.a) and math.isclose(l1.b, l2.b)
def areSame(l1, l2):
return areParallel(l1, l2) and math.isclose(l1.c, l2.c)
def areIntersect(l1, l2, p):
if areParallel(l1, l2):
return False
p.x = (l2.b * l1.c - l1.b * l2.c) / (l2.a * l1.b - l1.a * l2.b)
if not math.isclose(l1.b, 0.0):
p.y = -(l1.a * p.x + l1.c)
else:
p.y = -(l2.a * p.x + l2.c)
return True
class vec:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def toVec(a, b):
return vec(b.x-a.x, b.y-a.y)
def scale(v, s):
return vec(v.x*s, v.y*s)
def translate(p, v):
return point(p.x+v.x, p.y+v.y)
def pointSlopeToLine(p, m, l):
l.a, l.b = -m, 1
l.c = -((l.a * p.x) + (l.b * p.y))
def closestPoint(l, p, ans):
if math.isclose(l.b, 0.0):
ans.x, ans.y = -l.c, p.y
return
if math.isclose(l.a, 0.0):
ans.x, ans.y = p.x, -l.c
return
perpendicular = line()
pointSlopeToLine(p, 1.0/l.a, perpendicular)
areIntersect(l, perpendicular, ans)
def reflectionPoint(l, p, ans):
b = point()
closestPoint(l, p, b)
v = toVec(p, b)
ans.x, ans.y = p.x + 2 * v.x, p.y + 2 * v.y
def dot(a, b):
return a.x * b.x + a.y * b.y
def norm_sq(v):
return v.x * v.x + v.y * v.y
def angle(a, o, b):
oa = toVec(o, a)
ob = toVec(o, b)
return math.acos(dot(oa, ob) / math.sqrt(norm_sq(oa) * norm_sq(ob)))
def distToLine(p, a, b, c):
ap = toVec(a, p)
ab = toVec(a, b)
u = dot(ap, ab) / norm_sq(ab)
s = scale(ab, u)
c.x, c.y = a.x+s.x, a.y+s.y
return dist(p, c)
def distToLineSegment(p, a, b, c):
ap = toVec(a, p)
ab = toVec(a, b)
u = dot(ap, ab) / norm_sq(ab)
if u < 0.0:
c.x, c.y = a.x, a.y
return dist(p, a)
if u > 1.0:
c.x, c.y = b.x, b.y
return dist(p, b)
return distToLine(p, a, b, c)
def cross(a, b):
return a.x * b.y - a.y * b.x
def ccw(p, q, r):
return cross(toVec(p, q), toVec(p, r)) > -EPS
def collinear(p, q, r):
return abs(cross(toVec(p, q), toVec(p, r))) < EPS
if __name__ == '__main__':
P = [point(2e-9, 0), point(0, 2), point(1e-9, 1)]
P = sorted(P)
for pt in P:
print('%.9lf, %.9lf' % (pt.x, pt.y))
P1 = point()
P2 = point()
P3 = point(0, 1)
print('%d' % (P1 == P2))
print('%d' % (P1 == P3))
P = [point(2, 2), point(4, 3), point(2, 4), point(6, 6), point(2, 6), point(6, 5)]
P = sorted(P)
for p in P:
print('(%.2lf, %.2lf)' % (p.x, p.y))
P = [point(2, 2), point(4, 3), point(2, 4), point(6, 6), point(2, 6), point(6, 5), point(8, 6)]
d = dist(P[0], P[5])
print('Euclidean distance between P[0] and P[5] = %.2lf' % d)
l1 = line()
l2 = line()
l3 = line()
l4 = line()
pointsToLine(P[0], P[1], l1)
print('%.2lf * x + %.2lf * y + %.2lf = 0.00' % (l1.a, l1.b, l1.c))
pointsToLine(P[0], P[2], l2);
print('%.2lf * x + %.2lf * y + %.2lf = 0.00' % (l2.a, l2.b, l2.c))
pointsToLine(P[2], P[3], l3)
print('l1 & l2 are parallel? %d' % areParallel(l1, l2))
print('l1 & l3 are parallel? %d' % areParallel(l1, l3))
pointsToLine(P[2], P[4], l4)
print('l1 & l2 are the same? %d' % areSame(l1, l2))
print('l2 & l4 are the same? %d' % areSame(l2, l4))
p12 = point()
res = areIntersect(l1, l2, p12)
print('l1 & l2 are intersect? %d, at (%.2lf, %.2lf)' % (res, p12.x, p12.y))
ans = point()
d = distToLine(P[0], P[2], P[3], ans)
print('Closest point from P[0] to line (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
closestPoint(l3, P[0], ans)
print('Closest point from P[0] to line V2 (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, dist(P[0], ans)))
d = distToLineSegment(P[0], P[2], P[3], ans)
print('Closest point from P[0] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
d = distToLineSegment(P[1], P[2], P[3], ans)
print('Closest point from P[1] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
d = distToLineSegment(P[6], P[2], P[3], ans)
print('Closest point from P[6] to line SEGMENT (P[2]-P[3]): (%.2lf, %.2lf), dist = %.2lf' % (ans.x, ans.y, d))
reflectionPoint(l4, P[1], ans)
print('Reflection point from P[1] to line (P[2]-P[4]): (%.2lf, %.2lf)' % (ans.x, ans.y))
print('Angle P[0]-P[4]-P[3] = %.2lf' % RAD_to_DEG(angle(P[0], P[4], P[3])))
print('Angle P[0]-P[2]-P[1] = %.2lf' % RAD_to_DEG(angle(P[0], P[2], P[1])))
print('Angle P[4]-P[3]-P[6] = %.2lf' % RAD_to_DEG(angle(P[4], P[3], P[6])))
print('P[0], P[2], P[3] form A left turn? %d' % ccw(P[0], P[2], P[3]))
print('P[0], P[3], P[2] form A left turn? %d' % ccw(P[0], P[3], P[2]))
print('P[0], P[2], P[3] are collinear? %d' % collinear(P[0], P[2], P[3]))
print('P[0], P[2], P[4] are collinear? %d' % collinear(P[0], P[2], P[4]))
p = point(3, 7)
q = point(11, 13)
r = point(35, 30)
print('r is on the %s of line p-q (direction p->q)' % ('left' if ccw(p, q, r) else 'right'))
A = point(2.0, 2.0)
B = point(4.0, 3.0)
v = toVec(A, B)
C = point(3.0, 2.0)
D = translate(C, v)
print('D = (%.2lf, %.2lf)' % (D.x, D.y))
E = translate(C, scale(v, 0.5))
print('E = (%.2lf, %.2lf)' % (E.x, E.y))
print('B = (%.2lf, %.2lf)' % (B.x, B.y))
F = rotate(B, 90)
print('F = (%.2lf, %.2lf)' % (F.x, F.y))
G = rotate(B, 180)
print('G = (%.2lf, %.2lf)' % (G.x, G.y))
|
python/py-gdbm/files/setup.py
|
davidlrichmond/macports-ports
| 1,199 |
143529
|
<gh_stars>1000+
try:
import distutils
from distutils import sysconfig
from distutils.command.install import install
from distutils.core import setup, Extension
except:
raise SystemExit, "Distutils problem"
prefix = "__PREFIX__"
inc_dirs = [prefix + "/include"]
lib_dirs = [prefix + "/lib"]
libs = ["gdbm"]
setup(name = "gdbm",
version = "__VERSION__",
description = "GDBM Extension to Python",
ext_modules = [Extension('gdbm', ['gdbmmodule.c'],
include_dirs = inc_dirs,
libraries = libs,
library_dirs = lib_dirs)]
)
|
examples/mnist/mnist_util.py
|
fyumoto/RBMs
| 124 |
143540
|
<filename>examples/mnist/mnist_util.py<gh_stars>100-1000
import os
import numpy as np
import pandas
from math import sqrt
from paysage import samplers, schedules, batch
from paysage import backends as be
# import the plotting module using the absolute path
from importlib import util
filename = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), "plotting.py")
spec = util.spec_from_file_location("plotting", location=filename)
plotting = util.module_from_spec(spec)
spec.loader.exec_module(plotting)
# ----- DEFAULT PATHS ----- #
def default_paths(file = "shuffled"):
files = {"shuffled": {"input": "mnist.h5", "output": "shuffled_mnist.h5"},
}
file_path = os.path.abspath(__file__)
mnist_path = os.path.join(os.path.dirname(file_path), files[file]["input"])
if not os.path.exists(mnist_path):
raise IOError("{} does not exist. run download_mnist.py to fetch from the web"
.format(mnist_path))
shuffled_path = os.path.join(os.path.dirname(file_path), files[file]["output"])
if not os.path.exists(shuffled_path):
print("Shuffled file does not exist, creating a shuffled dataset.")
shuffler = batch.DataShuffler(mnist_path, shuffled_path, complevel=0)
shuffler.shuffle()
return shuffled_path
# ----- DATA MANIPULATION ----- #
def create_batch(batch_size, train_fraction=0.95, transform=be.do_nothing):
"""
Create a Batch reader.
Args:
transform (callable): the transform function.
train_fraction (float): the training data fraction.
Returns:
data (Batch): a batcher.
"""
samples = be.float_tensor(pandas.read_hdf(
default_paths(), key='train/images').as_matrix())
return batch.in_memory_batch(samples, batch_size, train_fraction, transform)
# ----- CHECK MODEL ----- #
def example_plot(grid, show_plot, dim=28, vmin=0, vmax=1, cmap=plotting.cm.gray):
numpy_grid = be.to_numpy_array(grid)
if show_plot:
plotting.plot_image_grid(numpy_grid, (dim,dim), vmin, vmax, cmap=cmap)
def show_metrics(rbm, performance, show_plot=True):
performance.plot_metrics(show=show_plot)
def compute_reconstructions(rbm, v_data, n_recon=10, vertical=False, num_to_avg=1):
v_model = be.zeros_like(v_data)
# Average over n reconstruction attempts
for k in range(num_to_avg):
reconstructions = rbm.compute_reconstructions(v_data)
v_model += reconstructions.get_visible() / num_to_avg
idx = np.random.choice(range(len(v_model)), n_recon, replace=False)
grid = np.array([[be.to_numpy_array(v_data[i]),
be.to_numpy_array(v_model[i])] for i in idx])
if vertical:
return grid
else:
return grid.swapaxes(0,1)
def show_reconstructions(rbm, v_data, show_plot, dim=28, n_recon=10,
vertical=False, num_to_avg=1):
print("\nPlot a random sample of reconstructions")
grid = compute_reconstructions(rbm, v_data, n_recon, vertical, num_to_avg)
example_plot(grid, show_plot, dim=dim)
def compute_fantasy_particles(rbm, n_fantasy=5, fantasy_steps=100, beta_std=0.6,
run_mean_field=True):
schedule = schedules.Linear(initial=1.0, delta = 1 / (fantasy_steps-1))
fantasy = samplers.SequentialMC.generate_fantasy_state(rbm,
n_fantasy*n_fantasy,
fantasy_steps,
schedule=schedule,
beta_std=beta_std,
beta_momentum=0.0)
if run_mean_field:
fantasy = rbm.mean_field_iteration(1, fantasy)
v_model = fantasy[0]
grid = np.array([be.to_numpy_array(v) for v in v_model])
return grid.reshape(n_fantasy, n_fantasy, -1)
def show_fantasy_particles(rbm, v_data, show_plot, dim=28, n_fantasy=5,
fantasy_steps=100, beta_std=0.6, run_mean_field=True):
print("\nPlot a random sample of fantasy particles")
grid = compute_fantasy_particles(rbm, n_fantasy, fantasy_steps,
beta_std=beta_std,
run_mean_field=run_mean_field)
example_plot(grid, show_plot, dim=dim)
def compute_weights(rbm, n_weights=25, l=0, random=True):
# can't sample more than what we've got
n_weights = min(n_weights, rbm.connections[l].shape[1])
# floor to the nearest square below
grid_size = int(sqrt(n_weights))
n_weights = grid_size**2
if random:
idx = np.random.choice(range(rbm.connections[l].shape[1]),
n_weights, replace=False)
else:
idx = np.arange(n_weights)
wprod = rbm.connections[0].weights.W()
for i in range(1,l+1):
wprod = be.dot(wprod, rbm.connections[i].weights.W())
grid = np.array([be.to_numpy_array(wprod[:, i])
for i in idx])
return grid.reshape(grid_size, grid_size, -1)
def show_weights(rbm, show_plot, dim=28, n_weights=25, random=True):
print("\nPlot a random sample of the weights")
for l in range(rbm.num_connections):
grid = compute_weights(rbm, n_weights, l=l, random=random)
# normalize the grid between -1 and +1
maxval = np.max(np.abs(grid))
grid /= maxval
example_plot(grid, show_plot, dim=dim, vmin=-1, vmax=+1,
cmap=plotting.cm.bwr)
def weight_norm_histogram(rbm, show_plot=False, filename=None):
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
for l in range(rbm.num_connections):
num_inputs = rbm.connections[l].shape[0]
norm = be.to_numpy_array(be.norm(rbm.connections[l].weights.W(), axis=0) / sqrt(num_inputs))
sns.distplot(norm, ax=ax, label=str(l))
ax.legend()
if show_plot:
plt.show(fig)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
|
release/stubs.min/System/Drawing/__init___parts/Color.py
|
htlcnn/ironpython-stubs
| 182 |
143555
|
<reponame>htlcnn/ironpython-stubs
class Color(object):
""" Represents an ARGB (alpha,red,green,blue) color. """
def Equals(self,obj):
"""
Equals(self: Color,obj: object) -> bool
Tests whether the specified object is a System.Drawing.Color structure and is equivalent to this
System.Drawing.Color structure.
obj: The object to test.
Returns: true if obj is a System.Drawing.Color structure equivalent to this System.Drawing.Color
structure; otherwise,false.
"""
pass
@staticmethod
def FromArgb(*__args):
"""
FromArgb(alpha: int,baseColor: Color) -> Color
Creates a System.Drawing.Color structure from the specified System.Drawing.Color structure,but
with the new specified alpha value. Although this method allows a 32-bit value to be passed for
the alpha value,the value is limited to 8 bits.
alpha: The alpha value for the new System.Drawing.Color. Valid values are 0 through 255.
baseColor: The System.Drawing.Color from which to create the new System.Drawing.Color.
Returns: The System.Drawing.Color that this method creates.
FromArgb(red: int,green: int,blue: int) -> Color
Creates a System.Drawing.Color structure from the specified 8-bit color values (red,green,and
blue). The alpha value is implicitly 255 (fully opaque). Although this method allows a 32-bit
value to be passed for each color component,the value of each component is limited to 8 bits.
red: The red component value for the new System.Drawing.Color. Valid values are 0 through 255.
green: The green component value for the new System.Drawing.Color. Valid values are 0 through 255.
blue: The blue component value for the new System.Drawing.Color. Valid values are 0 through 255.
Returns: The System.Drawing.Color that this method creates.
FromArgb(argb: int) -> Color
Creates a System.Drawing.Color structure from a 32-bit ARGB value.
argb: A value specifying the 32-bit ARGB value.
Returns: The System.Drawing.Color structure that this method creates.
FromArgb(alpha: int,red: int,green: int,blue: int) -> Color
Creates a System.Drawing.Color structure from the four ARGB component (alpha,red,green,and
blue) values. Although this method allows a 32-bit value to be passed for each component,the
value of each component is limited to 8 bits.
alpha: The alpha component. Valid values are 0 through 255.
red: The red component. Valid values are 0 through 255.
green: The green component. Valid values are 0 through 255.
blue: The blue component. Valid values are 0 through 255.
Returns: The System.Drawing.Color that this method creates.
"""
pass
@staticmethod
def FromKnownColor(color):
"""
FromKnownColor(color: KnownColor) -> Color
Creates a System.Drawing.Color structure from the specified predefined color.
color: An element of the System.Drawing.KnownColor enumeration.
Returns: The System.Drawing.Color that this method creates.
"""
pass
@staticmethod
def FromName(name):
"""
FromName(name: str) -> Color
Creates a System.Drawing.Color structure from the specified name of a predefined color.
name: A string that is the name of a predefined color. Valid names are the same as the names of the
elements of the System.Drawing.KnownColor enumeration.
Returns: The System.Drawing.Color that this method creates.
"""
pass
def GetBrightness(self):
"""
GetBrightness(self: Color) -> Single
Gets the hue-saturation-brightness (HSB) brightness value for this System.Drawing.Color
structure.
Returns: The brightness of this System.Drawing.Color. The brightness ranges from 0.0 through 1.0,where
0.0 represents black and 1.0 represents white.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: Color) -> int
Returns a hash code for this System.Drawing.Color structure.
Returns: An integer value that specifies the hash code for this System.Drawing.Color.
"""
pass
def GetHue(self):
"""
GetHue(self: Color) -> Single
Gets the hue-saturation-brightness (HSB) hue value,in degrees,for this System.Drawing.Color
structure.
Returns: The hue,in degrees,of this System.Drawing.Color. The hue is measured in degrees,ranging from
0.0 through 360.0,in HSB color space.
"""
pass
def GetSaturation(self):
"""
GetSaturation(self: Color) -> Single
Gets the hue-saturation-brightness (HSB) saturation value for this System.Drawing.Color
structure.
Returns: The saturation of this System.Drawing.Color. The saturation ranges from 0.0 through 1.0,where
0.0 is grayscale and 1.0 is the most saturated.
"""
pass
def ToArgb(self):
"""
ToArgb(self: Color) -> int
Gets the 32-bit ARGB value of this System.Drawing.Color structure.
Returns: The 32-bit ARGB value of this System.Drawing.Color.
"""
pass
def ToKnownColor(self):
"""
ToKnownColor(self: Color) -> KnownColor
Gets the System.Drawing.KnownColor value of this System.Drawing.Color structure.
Returns: An element of the System.Drawing.KnownColor enumeration,if the System.Drawing.Color is created
from a predefined color by using either the System.Drawing.Color.FromName(System.String) method
or the System.Drawing.Color.FromKnownColor(System.Drawing.KnownColor) method; otherwise,0.
"""
pass
def ToString(self):
"""
ToString(self: Color) -> str
Converts this System.Drawing.Color structure to a human-readable string.
Returns: A string that is the name of this System.Drawing.Color,if the System.Drawing.Color is created
from a predefined color by using either the System.Drawing.Color.FromName(System.String) method
or the System.Drawing.Color.FromKnownColor(System.Drawing.KnownColor) method; otherwise,a
string that consists of the ARGB component names and their values.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __ne__(self,*args):
pass
A=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the alpha component value of this System.Drawing.Color structure.
Get: A(self: Color) -> Byte
"""
B=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the blue component value of this System.Drawing.Color structure.
Get: B(self: Color) -> Byte
"""
G=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the green component value of this System.Drawing.Color structure.
Get: G(self: Color) -> Byte
"""
IsEmpty=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether this System.Drawing.Color structure is uninitialized.
Get: IsEmpty(self: Color) -> bool
"""
IsKnownColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether this System.Drawing.Color structure is a predefined color. Predefined colors are represented by the elements of the System.Drawing.KnownColor enumeration.
Get: IsKnownColor(self: Color) -> bool
"""
IsNamedColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether this System.Drawing.Color structure is a named color or a member of the System.Drawing.KnownColor enumeration.
Get: IsNamedColor(self: Color) -> bool
"""
IsSystemColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether this System.Drawing.Color structure is a system color. A system color is a color that is used in a Windows display element. System colors are represented by elements of the System.Drawing.KnownColor enumeration.
Get: IsSystemColor(self: Color) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of this System.Drawing.Color.
Get: Name(self: Color) -> str
"""
R=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the red component value of this System.Drawing.Color structure.
Get: R(self: Color) -> Byte
"""
AliceBlue=None
AntiqueWhite=None
Aqua=None
Aquamarine=None
Azure=None
Beige=None
Bisque=None
Black=None
BlanchedAlmond=None
Blue=None
BlueViolet=None
Brown=None
BurlyWood=None
CadetBlue=None
Chartreuse=None
Chocolate=None
Coral=None
CornflowerBlue=None
Cornsilk=None
Crimson=None
Cyan=None
DarkBlue=None
DarkCyan=None
DarkGoldenrod=None
DarkGray=None
DarkGreen=None
DarkKhaki=None
DarkMagenta=None
DarkOliveGreen=None
DarkOrange=None
DarkOrchid=None
DarkRed=None
DarkSalmon=None
DarkSeaGreen=None
DarkSlateBlue=None
DarkSlateGray=None
DarkTurquoise=None
DarkViolet=None
DeepPink=None
DeepSkyBlue=None
DimGray=None
DodgerBlue=None
Empty=None
Firebrick=None
FloralWhite=None
ForestGreen=None
Fuchsia=None
Gainsboro=None
GhostWhite=None
Gold=None
Goldenrod=None
Gray=None
Green=None
GreenYellow=None
Honeydew=None
HotPink=None
IndianRed=None
Indigo=None
Ivory=None
Khaki=None
Lavender=None
LavenderBlush=None
LawnGreen=None
LemonChiffon=None
LightBlue=None
LightCoral=None
LightCyan=None
LightGoldenrodYellow=None
LightGray=None
LightGreen=None
LightPink=None
LightSalmon=None
LightSeaGreen=None
LightSkyBlue=None
LightSlateGray=None
LightSteelBlue=None
LightYellow=None
Lime=None
LimeGreen=None
Linen=None
Magenta=None
Maroon=None
MediumAquamarine=None
MediumBlue=None
MediumOrchid=None
MediumPurple=None
MediumSeaGreen=None
MediumSlateBlue=None
MediumSpringGreen=None
MediumTurquoise=None
MediumVioletRed=None
MidnightBlue=None
MintCream=None
MistyRose=None
Moccasin=None
NavajoWhite=None
Navy=None
OldLace=None
Olive=None
OliveDrab=None
Orange=None
OrangeRed=None
Orchid=None
PaleGoldenrod=None
PaleGreen=None
PaleTurquoise=None
PaleVioletRed=None
PapayaWhip=None
PeachPuff=None
Peru=None
Pink=None
Plum=None
PowderBlue=None
Purple=None
Red=None
RosyBrown=None
RoyalBlue=None
SaddleBrown=None
Salmon=None
SandyBrown=None
SeaGreen=None
SeaShell=None
Sienna=None
Silver=None
SkyBlue=None
SlateBlue=None
SlateGray=None
Snow=None
SpringGreen=None
SteelBlue=None
Tan=None
Teal=None
Thistle=None
Tomato=None
Transparent=None
Turquoise=None
Violet=None
Wheat=None
White=None
WhiteSmoke=None
Yellow=None
YellowGreen=None
|
openelex/tasks/validate.py
|
Mpopoma/oe-core
| 156 |
143557
|
<filename>openelex/tasks/validate.py
from __future__ import print_function
import sys
from collections import OrderedDict
import click
from .utils import load_module, split_args
@click.command(name='validate.list', help="Show available validations for state")
@click.option('--state', required=True, help="Two-letter state-abbreviation, e.g. NY")
def list(state):
"""
Show available validations for state.
"""
state_mod = load_module(state, ['validate'])
print("\nAvailable validators:\n")
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
out = "\t%s" % name
if func.__doc__:
out += "\n\t\t %s" % func.__doc__
print(out + "\n")
@click.command(name='validate.run', help="Run data validations for state")
@click.option('--state', required=True, help="Two-letter state-abbreviation, e.g. NY")
@click.option('--include', help="Validations to run (comma-separated list)")
@click.option('--exclude', help="Validations to skip (comma-separated list)")
def run(state, include=None, exclude=None):
"""
Run data validations for state.
State is required. Optionally filter validations using include/exclude flags.
"""
if include and exclude:
sys.exit("ERROR: You can not use both include and exclude flags!")
state_mod = load_module(state, ['validate'])
# Load all validations in order found
validations = OrderedDict()
for name in dir(state_mod.validate):
if name.startswith('validate_'):
func = getattr(state_mod.validate, name)
validations[name] = func
# Filter validations based in include/exclude flags
if include:
to_run = split_args(include)
for val in validations:
if val not in to_run:
validations.pop(val)
if exclude:
to_skip = split_args(exclude)
for val in validations:
if val in to_skip:
validations.pop(val)
# Run remaining validations
run_validation(state, list(validations.values()))
def run_validation(state, validators):
passed = []
failed = []
print()
for validator in validators:
try:
validator()
passed.append(validator.__name__)
except Exception as e:
failed.append("Error: %s - %s - %s" %
(state.upper(), validator.__name__, e))
print("\n\nVALIDATION RESULTS")
print("Passed: %s" % len(passed))
print("Failed: %s" % len(failed))
for fail in failed:
print("\t%s" % fail)
print()
|
tools/test_bundled_libs.py
|
cyyever/DALI
| 3,967 |
143560
|
#!/usr/bin/env python
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sys import argv
import subprocess
# Check if any element in the elms list matches the value
def get_list_elm_match(value, elms):
return any(e in value for e in elms)
def check_ldd_out(lib, linked_lib, bundled_lib_names, allowed_libs):
# Gather all libs that may be linked with 'lib' and don't need to be bundled
# Entries from 'lib' key in allowed_libs should cover all 'lib*' libs
# Empty key is used for all libs
allowed_libs_to_check = []
for k in allowed_libs.keys():
if k in lib:
allowed_libs_to_check += allowed_libs[k]
return linked_lib in bundled_lib_names or get_list_elm_match(linked_lib, allowed_libs_to_check)
def main():
allowed_libs = {"": ["linux-vdso.so.1",
"libm.so.6",
"libpthread.so.0",
"libc.so.6",
"/lib64/ld-linux",
"/lib/ld-linux",
"libdl.so.2",
"librt.so.1",
"libstdc++.so.6",
"libgcc_s.so.1",
"libasan.so",
"liblsan.so",
"libubsan.so",
"libtsan.so"
]}
bundled_libs = argv[1:]
# Gather all names of bundled libs without path
bundled_lib_names = []
for lib in bundled_libs:
beg = lib.rfind('/')
bundled_lib_names.append(lib[beg + 1:])
print("Checking bundled libs linkage:")
for lib_path, lib_name in zip(bundled_libs, bundled_lib_names):
print ("- " + lib_name)
ldd = subprocess.Popen(["ldd", lib_path], stdout=subprocess.PIPE)
for l in ldd.stdout:
l = l.decode().strip('\t').strip('\n')
linked_lib = l.split()[0]
if not check_ldd_out(lib_name, linked_lib, bundled_lib_names, allowed_libs):
print('Library: "' + linked_lib + '" should be bundled in whl or removed from the dynamic link dependency')
exit(1)
print("-> OK")
if __name__ == '__main__':
main()
|
extraPackages/matplotlib-3.0.3/examples/text_labels_and_annotations/mathtext_demo.py
|
dolboBobo/python3_ios
| 130 |
143602
|
<filename>extraPackages/matplotlib-3.0.3/examples/text_labels_and_annotations/mathtext_demo.py<gh_stars>100-1000
"""
=============
Mathtext Demo
=============
Use Matplotlib's internal LaTeX parser and layout engine. For true LaTeX
rendering, see the text.usetex option.
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([1, 2, 3], 'r', label=r'$\sqrt{x^2}$')
ax.legend()
ax.set_xlabel(r'$\Delta_i^j$', fontsize=20)
ax.set_ylabel(r'$\Delta_{i+1}^j$', fontsize=20)
ax.set_title(r'$\Delta_i^j \hspace{0.4} \mathrm{versus} \hspace{0.4} '
r'\Delta_{i+1}^j$', fontsize=20)
tex = r'$\mathcal{R}\prod_{i=\alpha_{i+1}}^\infty a_i\sin(2 \pi f x_i)$'
ax.text(1, 1.6, tex, fontsize=20, va='bottom')
fig.tight_layout()
plt.show()
|
test/integration/test_local_job_cancellation.py
|
quacksawbones/galaxy-1
| 1,085 |
143611
|
<reponame>quacksawbones/galaxy-1
"""Integration test for the local job runner and cancelling jobs via API."""
import time
import psutil
from galaxy_test.base.populators import (
DatasetPopulator,
)
from galaxy_test.driver import integration_util
class CancelsJob:
def _setup_cat_data_and_sleep(self, history_id):
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
running_inputs = {
"input1": {"src": "hda", "id": hda1["id"]},
"sleep_time": 240,
}
running_response = self.dataset_populator.run_tool(
"cat_data_and_sleep",
running_inputs,
history_id,
)
job_dict = running_response["jobs"][0]
return job_dict["id"]
def _wait_for_job_running(self, job_id):
self.galaxy_interactor.wait_for(lambda: self._get("jobs/%s" % job_id).json()['state'] != 'running',
what="Wait for job to start running",
maxseconds=60)
class LocalJobCancellationTestCase(CancelsJob, integration_util.IntegrationTestCase):
framework_tool_and_types = True
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
def test_cancel_job_with_admin_message(self):
with self.dataset_populator.test_history() as history_id:
job_id = self._setup_cat_data_and_sleep(history_id)
self._wait_for_job_running(job_id)
app = self._app
sa_session = app.model.context.current
Job = app.model.Job
job = sa_session.query(Job).filter_by(tool_id="cat_data_and_sleep").order_by(Job.create_time.desc()).first()
# This is how the admin controller code cancels a job
job.job_stderr = 'admin cancelled job'
job.set_state(app.model.Job.states.DELETED_NEW)
sa_session.add(job)
sa_session.flush()
self.galaxy_interactor.wait_for(lambda: self._get("jobs/%s" % job_id).json()['state'] != 'error',
what="Wait for job to end in error",
maxseconds=60)
def test_kill_process(self):
"""
"""
with self.dataset_populator.test_history() as history_id:
job_id = self._setup_cat_data_and_sleep(history_id)
app = self._app
sa_session = app.model.context.current
external_id = None
state = False
Job = app.model.Job
job = sa_session.query(Job).filter_by(tool_id="cat_data_and_sleep").order_by(Job.create_time.desc()).first()
# Not checking the state here allows the change from queued to running to overwrite
# the change from queued to deleted_new in the API thread - this is a problem because
# the job will still run. See issue https://github.com/galaxyproject/galaxy/issues/4960.
while external_id is None or state != app.model.Job.states.RUNNING:
sa_session.refresh(job)
assert not job.finished
external_id = job.job_runner_external_id
state = job.state
assert external_id
external_id = int(external_id)
pid_exists = psutil.pid_exists(external_id)
assert pid_exists
delete_response = self.dataset_populator.cancel_job(job_id)
assert delete_response.json() is True
state = None
# Now make sure the job becomes complete.
for _ in range(100):
sa_session.refresh(job)
state = job.state
if state == app.model.Job.states.DELETED:
break
time.sleep(.1)
# Now make sure the pid is actually killed.
for _ in range(100):
if not pid_exists:
break
pid_exists = psutil.pid_exists(external_id)
time.sleep(.1)
final_state = f"pid exists? {pid_exists}, final db job state {state}"
assert state == app.model.Job.states.DELETED, final_state
assert not pid_exists, final_state
|
activflow/quotient/validators.py
|
mcgauranc/mosaiq
| 567 |
143640
|
"""Custom validation logic"""
from django.core.exceptions import ValidationError
def validate_initial_cap(value):
"""Sample validation"""
if not value[0].isupper():
raise ValidationError('First character should be capital')
|
youtube_dl/extractor/cbssports.py
|
hackarada/youtube-dl
| 3,001 |
143680
|
<gh_stars>1000+
from __future__ import unicode_literals
from .cbs import CBSBaseIE
class CBSSportsIE(CBSBaseIE):
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/(?:video|news)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.cbssports.com/nba/video/donovan-mitchell-flashes-star-potential-in-game-2-victory-over-thunder/',
'info_dict': {
'id': '1214315075735',
'ext': 'mp4',
'title': '<NAME> flashes star potential in Game 2 victory over Thunder',
'description': 'md5:df6f48622612c2d6bd2e295ddef58def',
'timestamp': 1524111457,
'upload_date': '20180419',
'uploader': 'CBSI-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'https://www.cbssports.com/nba/news/nba-playoffs-2018-watch-76ers-vs-heat-game-3-series-schedule-tv-channel-online-stream/',
'only_matching': True,
}]
def _extract_video_info(self, filter_query, video_id):
return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r'(?:=|%26)pcid%3D(\d+)', r'embedVideo(?:Container)?_(\d+)'],
webpage, 'video id')
return self._extract_video_info('byId=%s' % video_id, video_id)
|
examples/mcf_office.py
|
carmanzhang/cornac
| 597 |
143702
|
<gh_stars>100-1000
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Fit to and evaluate MCF on the Office Amazon dataset"""
from cornac.data import GraphModality
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import MCF
from cornac.datasets import amazon_office as office
# MCF leverages relationships among items, it jointly factorizes the user-item and item-item matrices
# The necessary data can be loaded as follows
ratings = office.load_feedback()
item_net = office.load_graph()
# Instantiate a GraphModality, it makes it convenient to work with graph (network) auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
item_graph_modality = GraphModality(data=item_net)
# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(
data=ratings,
test_size=0.2,
rating_threshold=3.5,
exclude_unknowns=True,
verbose=True,
item_graph=item_graph_modality,
)
# Instantiate MCF model
mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)
# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)
# Put everything together into an experiment and run it
Experiment(eval_method=ratio_split, models=[mcf], metrics=[rmse, ndcg, rec, pre]).run()
"""
Output:
| RMSE | NDCG@-1 | Recall@20 | Precision@20 | Train (s) | Test (s)
--- + ------ + ------- + --------- + ------------ + --------- + --------
MCF | 1.0854 | 0.1598 | 0.0348 | 0.0057 | 7.4057 | 4.1801
*Results may change from one run to another due to different random initial parameters
"""
|
backend/security/migrations/8036f244db77_create_user_and_role.py
|
anu-act-health-covid19-support/hospital-demand-webapp-backup
| 558 |
143754
|
"""
create user and role
Revision ID: 8036f244db77
Revises:
Create Date: 2017-10-11 17:22:00.467780
"""
from alembic import op
import sqlalchemy as sa
import backend
# revision identifiers, used by Alembic.
revision = '8036f244db77'
down_revision = None
branch_labels = ('security',)
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('created_at', backend.database.types.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', backend.database.types.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_role'))
)
op.create_index(op.f('ix_role_name'), 'role', ['name'], unique=True)
op.create_table('user',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('created_at', backend.database.types.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', backend.database.types.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('email', sa.String(length=50), nullable=False),
sa.Column('first_name', sa.String(length=32), nullable=False),
sa.Column('last_name', sa.String(length=64), nullable=False),
sa.Column('password', sa.String(), nullable=True),
sa.Column('active', sa.Boolean(name='active'), nullable=False),
sa.Column('confirmed_at', backend.database.types.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('user_role',
sa.Column('user_id', sa.BigInteger(), nullable=False),
sa.Column('role_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_user_role_role_id_role')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_user_role_user_id_user')),
sa.PrimaryKeyConstraint('user_id', 'role_id', name=op.f('pk_user_role'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_role')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_role_name'), table_name='role')
op.drop_table('role')
# ### end Alembic commands ###
|
python/stack/leetcode/next_greater_element_i.py
|
googege/algo-learn
| 153 |
143758
|
# 下一个更大元素I
from typing import List
class Solution:
# 暴力法
def nextGreaterElement1(self, nums1: List[int], nums2: List[int]) -> List[int]:
m, res = {}, []
for i in range(len(nums2)):
m[nums2[i]] = i
for i in range(len(nums1)):
exist = False
for j in range(m[nums1[i]] + 1, len(nums2)):
if nums2[j] > nums1[i]:
exist = True
res.append(nums2[j])
break
if not exist:
res.append(-1)
return res
# 使用栈+哈希表
def nextGreaterElement2(self, nums1: List[int], nums2: List[int]) -> List[int]:
stack, m = [], {}
for n in nums2:
while len(stack) > 0 and stack[-1] < n:
m[stack[-1]] = n
stack = stack[:-1]
stack.append(n)
return [m.get(n, -1) for n in nums1]
|
math/GCD/Python/GCD_using_Euclidean.py
|
avi-pal/al-go-rithms
| 1,253 |
143764
|
# Python code to demonstrate naive
# method to compute gcd ( Euclidean algo )
def computeGCD(x, y):
while(y):
x, y = y, x % y
return x
a = 60
b= 48
# prints 12
print ("The gcd of 60 and 48 is : ",end="")
print (computeGCD(60,48))
|
backpack/extensions/firstorder/batch_l2_grad/batch_l2_base.py
|
jabader97/backpack
| 395 |
143784
|
<reponame>jabader97/backpack<gh_stars>100-1000
"""Contains Base class for batch_l2_grad."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives.basederivatives import BaseParameterDerivatives
from backpack.extensions.firstorder.base import FirstOrderModuleExtension
if TYPE_CHECKING:
from backpack.extensions import BatchL2Grad
class BatchL2Base(FirstOrderModuleExtension):
"""BaseExtension for batch_l2."""
def __init__(self, params: List[str], derivatives: BaseParameterDerivatives = None):
"""Initialization.
If derivatives object is provided initializes methods that compute batch_l2.
If there is an existent method in a child class it is not overwritten.
Args:
params: parameter names
derivatives: derivatives object. Defaults to None.
"""
if derivatives is not None:
self.derivatives: BaseParameterDerivatives = derivatives
for param_str in params:
if not hasattr(self, param_str):
setattr(self, param_str, self._make_param_function(param_str))
super().__init__(params=params)
def _make_param_function(
self, param_str: str
) -> Callable[[BatchL2Grad, Module, Tuple[Tensor], Tuple[Tensor], None], Tensor]:
"""Creates a function that calculates batch_l2.
Args:
param_str: name of parameter
Returns:
function that calculates batch_l2
"""
def param_function(
ext: BatchL2Grad,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
bpQuantities: None,
) -> Tensor:
"""Calculates batch_l2 with the help of derivatives object.
Args:
ext: extension that is used
module: module that performed forward pass
g_inp: input gradient tensors
g_out: output gradient tensors
bpQuantities: additional quantities for second order
Returns:
batch_l2
"""
param_dims: List[int] = list(range(1, 1 + getattr(module, param_str).dim()))
return (
self.derivatives.param_mjp(
param_str, module, g_inp, g_out, g_out[0], sum_batch=False
)
** 2
).sum(param_dims)
return param_function
|
Python/graphs/Directed_Acyclic_Graph.py
|
zhcet19/NeoAlgo-1
| 897 |
143791
|
'''
Author : @anushkrishnav
Built using : networkx since it is a gold standard for Python DAGs (and other graphs). You can create a networkx directed graph with a list of tuples that represent the graph edges:
'''
import networkx as nx
from matplotlib import pyplot as plt
class DAG:
def __init__(self):
self.graph=nx.DiGraph()
def addEdges(self,edges):
"""Function to add one edge at a time and check if the graph is acyclic post insertion"""
self.graph.add_edge(edges)
if nx.is_directed_acyclic_graph(self.graph):
pass
else:
raise "Unable to insert "+str(edges)+"This is an Acyclic graph"
self.graph.remove_edge(edges)
def AddSetofEdges(self,listt):
"""Function to all a list of edges and check is the graph is an DAG for furthur details refer networkx"""
self.graph.add_edges_from(listt)
if nx.is_directed_acyclic_graph(self.graph):
pass
else:
raise "This is an acyclic graph check your edges"
self.graph.remove_edge(listt)
def Visualise(self,location="home"):
"""It uses Matplotlib to visualise the DAG .
The graph is stored in a PNG format . So name the file accourdingly
eg
>>> DAG.Visualise(home/img.png)"""
if self.graph==None:
return "There is no graph consider adding edges to visualise"
plt.tight_layout()
nx.draw_networkx(self.graph,arrows=True,node_size=800)
plt.savefig(location,format="PNG")
plt.clf()
return "Graph generated"
graph = DAG()
graph.AddSetofEdges([("root", "a"), ("a", "b"), ("a", "e"), ("b", "c"), ("b", "d"), ("d", "e")])
graph.Visualise("Python/graphs/graph.png")
|
JetMETAnalysis/METSkims/python/RECOSIMSumET_EventContent_cff.py
|
ckamtsikis/cmssw
| 852 |
143806
|
<reponame>ckamtsikis/cmssw<filename>JetMETAnalysis/METSkims/python/RECOSIMSumET_EventContent_cff.py
import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from JetMETAnalysis.METSkims.sumET_EventContent_cff import *
RECOSIMSumETEventContent = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RECOSIMSumETEventContent.outputCommands.extend(RECOSIMEventContent.outputCommands)
RECOSIMSumETEventContent.outputCommands.extend(sumETEventContent.outputCommands)
|
recipes/Python/363780_HasFriends/recipe-363780.py
|
tdiprima/code
| 2,023 |
143840
|
<reponame>tdiprima/code
"""This mixin supports autoloading of "friend" methods."""
__docformat__ = "restructuredtext"
from Curry import Curry
class HasFriends:
"""This mixin supports autoloading of "friend" methods.
That means if you have a class like ``aquarium.widget.FormUtil``, and you
try to call a function ``fooBar`` on that class, ``FormUtil`` (assuming it
mixes in this class) will automatically import
``aquarium.widget.formutil.fooBar`` (notice that the ``FormUtil`` class is
automatically associated with the ``formutil`` package) and return the
``fooBar`` function. ``fooBar`` will behave as if it were actually a
method inside ``FormUtil``. ``fooBar`` should be implemented as a normal
method that just happens to receive a ``FormUtil`` instance named ``self``
as its first argument.
"""
def __getattr__(self, attr):
"""Return the desired friend method.
Note, these methods will be cached in ``self._friendCache``.
"""
if not self.__dict__.has_key("_friendCache"):
self._friendCache = {}
cache = self._friendCache
if not cache.has_key(attr):
pieces = self.__module__.split(".")
pieces[-1] = pieces[-1].lower()
pieces.append(attr)
moduleName = ".".join(pieces)
try:
module = __import__(moduleName, {}, {}, [attr])
except:
raise (AttributeError, """\
%s instance has no attribute '%s', and HasFriends failed too""" %
(self.__class__.__name__, attr))
f = getattr(module, attr)
curry = Curry(f, self)
cache[attr] = curry
return cache[attr]
|
revise/libs/python/pyste/src/Pyste/__init__.py
|
DD-L/deel.boost.python
| 198 |
143873
|
<filename>revise/libs/python/pyste/src/Pyste/__init__.py<gh_stars>100-1000
# Copyright <NAME> 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
|
alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
143876
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAccountBindingSyncModel(object):
def __init__(self):
self._alipay_user_id = None
self._create_time = None
self._data_version = None
self._havana_user_id = None
self._modify_time = None
self._realm = None
self._status = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def havana_user_id(self):
return self._havana_user_id
@havana_user_id.setter
def havana_user_id(self, value):
self._havana_user_id = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def realm(self):
return self._realm
@realm.setter
def realm(self, value):
self._realm = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.havana_user_id:
if hasattr(self.havana_user_id, 'to_alipay_dict'):
params['havana_user_id'] = self.havana_user_id.to_alipay_dict()
else:
params['havana_user_id'] = self.havana_user_id
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.realm:
if hasattr(self.realm, 'to_alipay_dict'):
params['realm'] = self.realm.to_alipay_dict()
else:
params['realm'] = self.realm
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAccountBindingSyncModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'data_version' in d:
o.data_version = d['data_version']
if 'havana_user_id' in d:
o.havana_user_id = d['havana_user_id']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'realm' in d:
o.realm = d['realm']
if 'status' in d:
o.status = d['status']
return o
|
bibliopixel/util/deprecated.py
|
rec/leds
| 253 |
143877
|
import os, sys
CHOICES = 'ignore', 'fail', 'warn', 'warn_once'
DEFAULT = 'warn_once'
ACTION = None
HELP = """
Specify what to do when a project uses deprecated features:
ignore: do nothing
warn: print warning messages for each feature
warn_once: print a warning message, but only once for each type of feature
fail: throw an exception
"""
DEPRECATED = set()
FLAG = '--deprecated'
V4_FLAG = '--v4'
ENVIRONMENT_VARIABLE = 'BP_DEPRECATED'
V4_HELP = """\
Run BiblioPixel in v4 compatibility mode, to see if it will work with
future releases v4.x
"""
def add_arguments(parser):
parser.add_argument(V4_FLAG, action='store_true', help=V4_HELP)
def allowed():
_compute_action()
return ACTION != 'fail'
def deprecated(msg, *args, **kwds):
_compute_action()
if ACTION == 'ignore':
return
if ACTION == 'warn_once' and msg in DEPRECATED:
return
formatted = msg.format(*args, **kwds)
if ACTION == 'fail':
raise ValueError(formatted)
DEPRECATED.add(msg)
from . import log
log.warning(formatted)
def _compute_action():
global ACTION
if ACTION:
return
if FLAG in sys.argv:
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
if V4_FLAG in sys.argv:
ACTION = 'fail'
d = [i for i, v in enumerate(sys.argv) if v.startswith(FLAG + '=')]
if len(d) > 1:
raise ValueError('Only one %s argument can be used' % FLAG)
if not d:
ACTION = os.getenv(ENVIRONMENT_VARIABLE, ACTION or DEFAULT)
else:
arg = sys.argv.pop(d[0])
_, *rest = arg.split('=')
if len(rest) > 1:
raise ValueError('Extra = in flag %s' % arg)
if not (rest and rest[0].strip()):
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
ACTION = rest[0]
if ACTION not in CHOICES:
ACTION = None
raise ValueError('Unknown deprecation value (must be one of %s)' %
', '.join(CHOICES))
|
openbook_follows/views.py
|
TamaraAbells/okuna-api
| 164 |
143884
|
# Create your views here.
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_common.responses import ApiMessageResponse
from openbook_common.serializers import CommonFollowRequestSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_follows.serializers import FollowUserRequestSerializer, FollowSerializer, \
DeleteFollowSerializer, UpdateFollowSerializer, FollowUserSerializer, RequestToFollowUserSerializer, \
ApproveUserFollowRequestSerializer, RejectUserFollowRequestSerializer, ReceivedFollowRequestsRequestSerializer
class ReceivedFollowRequests(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
query_params = request.query_params.dict()
user = request.user
serializer = ReceivedFollowRequestsRequestSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
max_id = data.get('max_id')
count = data.get('count', 10)
received_follow_requests = user.get_received_follow_requests(max_id=max_id).order_by(
'-id')[:count]
response_serializer = CommonFollowRequestSerializer(received_follow_requests, many=True,
context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class RequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_request_to_follow_username = data.get('username')
user = request.user
with transaction.atomic():
follow_request = user.create_follow_request_for_user_with_username(user_to_request_to_follow_username)
response_serializer = CommonFollowRequestSerializer(follow_request, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class CancelRequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_cancel_request_for = data.get('username')
user = request.user
with transaction.atomic():
user.delete_follow_request_for_user_with_username(user_to_cancel_request_for)
return ApiMessageResponse(_('Follow request cancelled.'), status=status.HTTP_200_OK)
class ApproveUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ApproveUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_approve_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.approve_follow_request_from_user_with_username(
user_username=user_to_approve_follow_request_from_username)
return ApiMessageResponse(_('Follow request approved.'), status=status.HTTP_200_OK)
class RejectUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RejectUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_reject_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.reject_follow_request_from_user_with_username(
user_username=user_to_reject_follow_request_from_username)
return ApiMessageResponse(_('Follow request rejected.'), status=status.HTTP_200_OK)
class FollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = FollowUserRequestSerializer(data=request_data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
user_to_follow_username = data.get('username')
user = request.user
User = get_user_model()
user_to_follow = User.objects.get(username=user_to_follow_username)
with transaction.atomic():
follow = user.follow_user_with_id(user_to_follow.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class UnfollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
user = request.user
serializer = DeleteFollowSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_unfollow_username = data.get('username')
User = get_user_model()
user_to_unfollow = User.objects.get(username=user_to_unfollow_username)
with transaction.atomic():
user.unfollow_user_with_id(user_to_unfollow.pk)
response_serializer = FollowUserSerializer(user_to_unfollow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
user = request.user
serializer = UpdateFollowSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
followed_user_username = data.get('username')
User = get_user_model()
followed_user = User.objects.get(username=followed_user_username)
with transaction.atomic():
follow = user.update_follow_for_user_with_id(followed_user.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
lists_ids = request_data_copy.get('lists_ids', None)
if isinstance(lists_ids, str):
lists_ids = lists_ids.split(',')
request_data_copy['lists_ids'] = lists_ids
return request_data_copy
|
python/interpret-core/interpret/visual/test/test_interactive.py
|
prateekiiest/interpret
| 2,674 |
143903
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ..interactive import set_visualize_provider, get_visualize_provider
from ...provider import PreserveProvider
def test_provider_properties():
provider = PreserveProvider()
old_provider = get_visualize_provider()
set_visualize_provider(provider)
assert get_visualize_provider() == provider
set_visualize_provider(old_provider)
assert get_visualize_provider() == old_provider
|
d3rlpy/envs/__init__.py
|
ningyixue/AIPI530_Final_Project
| 565 |
143913
|
from .batch import AsyncBatchEnv, BatchEnv, SyncBatchEnv
from .wrappers import Atari, ChannelFirst, Monitor
__all__ = [
"BatchEnv",
"SyncBatchEnv",
"AsyncBatchEnv",
"ChannelFirst",
"Atari",
"Monitor",
]
|
Intra_MLP.py
|
suyukun666/UFO
| 122 |
143940
|
<filename>Intra_MLP.py<gh_stars>100-1000
import torch
import numpy
# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py
def index_points(device, points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
# batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def knn_l2(device, net, k, u):
'''
Input:
k: int32, number of k in k-nn search
net: (batch_size, npoint, c) float32 array, points
u: int32, block size
Output:
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
INF = 1e8
batch_size = net.size(0)
npoint = net.size(1)
n_channel = net.size(2)
square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)
def u_block(batch_size, npoint, u):
block = numpy.zeros([batch_size, npoint, npoint])
n = npoint // u
for i in range(n):
block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)
return block
# minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)
minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()
_, indices = torch.topk(minus_distance, k, largest=True, sorted=False)
return indices
|
utest/api/test_exposed_api.py
|
rdagum/robotframework
| 7,073 |
143982
|
import unittest
from os.path import join
from robot import api, model, parsing, reporting, result, running
from robot.api import parsing as api_parsing
from robot.utils.asserts import assert_equal, assert_true
class TestExposedApi(unittest.TestCase):
def test_execution_result(self):
assert_equal(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equal(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equal(api.ResultWriter, reporting.ResultWriter)
def test_visitors(self):
assert_equal(api.SuiteVisitor, model.SuiteVisitor)
assert_equal(api.ResultVisitor, result.ResultVisitor)
def test_deprecated_parsing(self):
assert_equal(api.get_model, parsing.get_model)
assert_equal(api.get_resource_model, parsing.get_resource_model)
assert_equal(api.get_tokens, parsing.get_tokens)
assert_equal(api.get_resource_tokens, parsing.get_resource_tokens)
assert_equal(api.Token, parsing.Token)
def test_parsing_getters(self):
assert_equal(api_parsing.get_model, parsing.get_model)
assert_equal(api_parsing.get_resource_model, parsing.get_resource_model)
assert_equal(api_parsing.get_tokens, parsing.get_tokens)
assert_equal(api_parsing.get_resource_tokens, parsing.get_resource_tokens)
def test_parsing_token(self):
assert_equal(api_parsing.Token, parsing.Token)
def test_parsing_model_statements(self):
for cls in parsing.model.Statement._statement_handlers.values():
assert_equal(getattr(api_parsing, cls.__name__), cls)
assert_true(not hasattr(api_parsing, 'Statement'))
def test_parsing_model_blocks(self):
for name in ('File', 'SettingSection', 'VariableSection', 'TestCaseSection',
'KeywordSection', 'CommentSection', 'TestCase', 'Keyword', 'For',
'If'):
assert_equal(getattr(api_parsing, name), getattr(parsing.model, name))
assert_true(not hasattr(api_parsing, 'Block'))
def test_parsing_visitors(self):
assert_equal(api_parsing.ModelVisitor, parsing.ModelVisitor)
assert_equal(api_parsing.ModelTransformer, parsing.ModelTransformer)
class TestModelObjects(unittest.TestCase):
"""These model objects are part of the public API.
They are only seldom needed directly and thus not exposed via the robot.api
package. Tests just validate they are not removed accidentally.
"""
def test_running_objects(self):
assert_true(running.TestSuite)
assert_true(running.TestCase)
assert_true(running.Keyword)
def test_result_objects(self):
assert_true(result.TestSuite)
assert_true(result.TestCase)
assert_true(result.Keyword)
class TestTestSuiteBuilder(unittest.TestCase):
# This list has paths like `/path/file.py/../file.robot` on purpose.
# They don't work unless normalized.
sources = [join(__file__, '../../../atest/testdata/misc', name)
for name in ('pass_and_fail.robot', 'normal.robot')]
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equal(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equal(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
|
examples/java-demo/feature_repo/test.py
|
danilopeixoto/feast
| 810 |
143990
|
<gh_stars>100-1000
import grpc
from feast.protos.feast.serving.ServingService_pb2 import (
FeatureList,
GetOnlineFeaturesRequest,
)
from feast.protos.feast.serving.ServingService_pb2_grpc import ServingServiceStub
from feast.protos.feast.types.Value_pb2 import RepeatedValue, Value
# Sample logic to fetch from a local gRPC java server deployed at 6566
def fetch_java():
channel = grpc.insecure_channel("localhost:6566")
stub = ServingServiceStub(channel)
feature_refs = FeatureList(val=["driver_hourly_stats:conv_rate"])
entity_rows = {
"driver_id": RepeatedValue(
val=[Value(int64_val=driver_id) for driver_id in range(1001, 1003)]
)
}
print(
stub.GetOnlineFeatures(
GetOnlineFeaturesRequest(features=feature_refs, entities=entity_rows,)
)
)
if __name__ == "__main__":
fetch_java()
|
nslocalizer.py
|
kolyadenko/nslocalizer
| 172 |
144005
|
<reponame>kolyadenko/nslocalizer
#!/usr/bin/python
import nslocalizer
def main():
nslocalizer.main()
if __name__ == "__main__":
main()
|
python/ee/tests/imagecollection_test.py
|
jsta/earthengine-api
| 1,909 |
144023
|
#!/usr/bin/env python
"""Test for the ee.imagecollection module."""
from unittest import mock
import unittest
import ee
from ee import apitestcase
class ImageCollectionTestCase(apitestcase.ApiTestCase):
def testImageCollectionConstructors(self):
"""Verifies that constructors understand valid parameters."""
from_id = ee.ImageCollection('abcd')
self.assertEqual(
ee.ApiFunction.lookup('ImageCollection.load'), from_id.func)
self.assertEqual({'id': 'abcd'}, from_id.args)
from_images = ee.ImageCollection([ee.Image(1), ee.Image(2)])
self.assertEqual(
ee.ApiFunction.lookup('ImageCollection.fromImages'), from_images.func)
self.assertEqual({'images': [ee.Image(1), ee.Image(2)]}, from_images.args)
self.assertEqual(
ee.ImageCollection([ee.Image(1)]), ee.ImageCollection(ee.Image(1)))
original = ee.ImageCollection('foo')
from_other_image_collection = ee.ImageCollection(original)
self.assertEqual(from_other_image_collection, original)
l = ee.List([ee.Image(1)]).slice(0)
from_list = ee.ImageCollection(l)
self.assertEqual({'images': l}, from_list.args)
from_computed_object = ee.ImageCollection(
ee.ComputedObject(None, {'x': 'y'}))
self.assertEqual({'x': 'y'}, from_computed_object.args)
def testImperativeFunctions(self):
"""Verifies that imperative functions return ready values."""
image_collection = ee.ImageCollection(ee.Image(1))
self.assertEqual({'value': 'fakeValue'}, image_collection.getInfo())
self.assertEqual('fakeMapId', image_collection.getMapId()['mapid'])
def testFilter(self):
"""Verifies that filtering an ImageCollection wraps the result."""
collection = ee.ImageCollection(ee.Image(1))
noop_filter = ee.Filter()
filtered = collection.filter(noop_filter)
self.assertIsInstance(filtered, ee.ImageCollection)
self.assertEqual(ee.ApiFunction.lookup('Collection.filter'), filtered.func)
self.assertEqual({
'collection': collection,
'filter': noop_filter
}, filtered.args)
def testFirst(self):
"""Verifies that first gets promoted properly."""
first = ee.ImageCollection(ee.Image(1)).first()
self.assertIsInstance(first, ee.Image)
self.assertEqual(ee.ApiFunction.lookup('Collection.first'), first.func)
def testPrepareForExport(self):
"""Verifies proper handling of export-related parameters."""
with apitestcase.UsingCloudApi():
base_collection = ee.ImageCollection(ee.Image(1))
collection, params = base_collection.prepare_for_export(
{'something': 'else'})
self.assertEqual(base_collection, collection)
self.assertEqual({'something': 'else'}, params)
collection, params = base_collection.prepare_for_export({
'crs': 'ABCD',
'crs_transform': '1,2,3,4,5,6'
})
# Need to do a serialized comparison for the collection because
# custom functions don't implement equality comparison.
def expected_preparation_function(img):
return img.reproject(
crs='ABCD', crsTransform=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
expected_collection = base_collection.map(expected_preparation_function)
self.assertEqual(
expected_collection.serialize(for_cloud_api=True),
collection.serialize(for_cloud_api=True))
self.assertEqual({}, params)
if __name__ == '__main__':
unittest.main()
|
alipay/aop/api/domain/AlipayPayAppCarPayModel.py
|
antopen/alipay-sdk-python-all
| 213 |
144026
|
<filename>alipay/aop/api/domain/AlipayPayAppCarPayModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayPayAppCarPayModel(object):
def __init__(self):
self._out_trade_no = None
self._qr_code = None
self._subject = None
self._total_amount = None
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def qr_code(self):
return self._qr_code
@qr_code.setter
def qr_code(self, value):
self._qr_code = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.qr_code:
if hasattr(self.qr_code, 'to_alipay_dict'):
params['qr_code'] = self.qr_code.to_alipay_dict()
else:
params['qr_code'] = self.qr_code
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPayAppCarPayModel()
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'qr_code' in d:
o.qr_code = d['qr_code']
if 'subject' in d:
o.subject = d['subject']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
|
effect_of_vanishing_photos/effect_of_vanishing_photos.py
|
gil9red/SimplePyScripts
| 117 |
144049
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""Эффект исчезновения фотографии
Кликая на области на фотографии запускаются процессы плавного увеличения
прозрачности пикселей, эффект как круги воды, будут расходиться пока не
закончатся непрозрачные пиксели"""
import sys
import traceback
try:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except:
try:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
except:
from PySide.QtGui import *
from PySide.QtCore import *
def log_uncaught_exceptions(ex_cls, ex, tb):
text = '{}: {}:\n'.format(ex_cls.__name__, ex)
text += ''.join(traceback.format_tb(tb))
print(text)
QMessageBox.critical(None, 'Error', text)
sys.exit(1)
sys.excepthook = log_uncaught_exceptions
class Timer(QTimer):
class Circle:
def __init__(self, pos_center):
self.pos_center = pos_center
self.radii = 1
def next(self):
self.radii += 1
def __init__(self, widget, image):
super().__init__()
self.circle_list = list()
self.widget = widget
self.setInterval(60)
self.timeout.connect(self.tick)
self.painter = QPainter(image)
self.painter.setRenderHint(QPainter.Antialiasing)
self.painter.setCompositionMode(QPainter.CompositionMode_Clear)
self.painter.setPen(Qt.NoPen)
self.painter.setBrush(Qt.transparent)
def add(self, pos_center):
self.circle_list.append(Timer.Circle(pos_center))
def tick(self):
for circle in self.circle_list:
self.painter.drawEllipse(circle.pos_center, circle.radii, circle.radii)
circle.next()
self.widget.update()
class Widget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('effect_of_vanishing_photos.py')
self.im = QImage('im.png')
self.resize(self.im.size())
self.timer = Timer(self, self.im)
self.timer.start()
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
self.timer.add(event.pos())
def paintEvent(self, event):
super().paintEvent(event)
p = QPainter(self)
p.setBrush(Qt.white)
p.drawRect(self.rect())
p.setBrush(Qt.yellow)
p.drawRect(self.width() // 6, self.width() // 5, self.width() // 3, self.height() // 4)
p.drawImage(0, 0, self.im)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Widget()
w.show()
app.exec_()
|
jam/admin/import_metadata.py
|
pubmania/jam-py
| 384 |
144056
|
<reponame>pubmania/jam-py
import sys
import os
import datetime
import time
import zipfile
import tempfile
import shutil
from distutils import dir_util
import json
import traceback
from werkzeug._compat import to_unicode
from ..common import consts, file_read, file_write, error_message
from ..db.db_modules import SQLITE, FIREBIRD, get_db_module
from .builder import items_insert_sql, items_update_sql, items_delete_sql
from .builder import indices_insert_sql, indices_delete_sql
from .export_metadata import metadata_items
def import_metadata(admin, file_name, from_client=False):
imp = MetaDataImport(admin, file_name, from_client)
return imp.import_metadata()
class MetaDataImport(object):
def __init__(self, task, file_name, from_client):
self.task = task
self.file_name = os.path.join(task.work_dir, os.path.normpath(file_name))
self.from_client = from_client
self.client_log = ''
self.server_log = ''
self.success = True
self.error = None
self.tmpdir = None
self.new_items = {}
self.old_items = {}
self.db_type = task.task_db_type
self.new_db_type = None
self.db_sql = None
self.adm_sql = None
self.db_module = task.task_db_module
self.items_hidden_fields = []
self.params_hidden_fields = [
'f_safe_mode', 'f_debugging', 'f_modification',
'f_client_modified', 'f_server_modified',
'f_build_version', 'f_params_version',
'f_maintenance', 'f_import_delay', 'f_production'
]
def import_metadata(self):
self.check_can_import()
self.prepare_data()
self.check_data_integrity()
self.analize_data()
self.wait_ready()
self.import_databases()
self.copy_files()
self.update_logs()
self.tidy_up()
return self.success, self.error, self.client_log
def check_can_import(self):
if self.db_type == SQLITE and not self.project_empty():
self.success = False
self.error = 'Metadata can not be imported into an existing SQLITE project'
self.show_error(self.error)
def update_gen_names(self):
if not get_db_module(self.db_type).NEED_GENERATOR:
self.items_hidden_fields.append('f_gen_name')
def update_indexes(self):
if self.new_db_type == FIREBIRD or self.db_type == FIREBIRD:
item = self.new_items['sys_indices']
for it in item:
if it.f_fields_list.value:
field_list = it.load_index_fields(it.f_fields_list.value)
desc = it.descending.value
if field_list:
it.edit()
if self.new_db_type == FIREBIRD:
l = []
for f in field_list:
l.append([f[0], desc])
field_list = l
elif self.db_type == FIREBIRD:
desc = field_list[0][1]
it.descending.value = desc
it.f_fields_list.value = it.store_index_fields(field_list)
it.post()
def update_item_idents(self, item_name, field_names, case):
item = self.new_items[item_name]
fields = []
for field_name in field_names:
fields.append(item.field_by_name(field_name))
item.log_changes = False
for it in item:
it.edit()
for field in fields:
field.value = case(field.value)
it.post()
def update_idents(self):
case = get_db_module(self.db_type).identifier_case
self.update_item_idents('sys_items', ['f_table_name', 'f_gen_name'], case)
self.update_item_idents('sys_fields', ['f_db_field_name'], case)
self.update_item_idents('sys_indices', ['f_index_name'], case)
self.update_indexes()
def prepare_data(self):
if self.success:
self.show_progress(self.task.language('import_reading_data'))
try:
self.tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(self.file_name) as z:
z.extractall(self.tmpdir)
file_name = os.path.join(self.tmpdir, 'task.dat')
data = file_read(file_name)
data_lists = json.loads(data)
for item_name in metadata_items:
item = self.task.item_by_name(item_name)
self.task.execute('DELETE FROM "%s" WHERE "DELETED" = 1' % item.table_name)
old_item = item.copy(handlers=False)
old_item.soft_delete = False
old_item.open(expanded=False)
field_names, dataset = self.get_dataset(old_item, data_lists)
new_item = item.copy(handlers=False)
new_item.open(expanded=False, fields=field_names, open_empty=True)
new_item._dataset = dataset
self.new_items[item.item_name] = new_item
self.old_items[item.item_name] = old_item
os.remove(file_name)
self.new_db_type = data_lists.get('db_type')
if self.new_db_type != self.db_type:
self.update_gen_names()
self.update_idents()
except Exception as e:
self.task.log.exception(e)
self.success = False
self.show_error(e)
def get_dataset(self, item, data_lists):
ns = []
ds = []
dl = data_lists.get(item.item_name)
if dl:
field_names = data_lists[item.item_name]['fields']
dataset = data_lists[item.item_name]['records']
for d in dataset:
ds.append([])
for i, f in enumerate(field_names):
if item.field_by_name(f):
ns.append(f)
for j, d in enumerate(dataset):
ds[j].append(dataset[j][i])
else:
for f in item.fields:
ns.append(f.field_name)
return ns, ds
def check_data_integrity(self):
if self.success:
self.show_progress(self.task.language('import_checking_integrity'))
errors = []
new = self.new_items['sys_items']
old = self.old_items['sys_items']
compare = self.compare_items(old, new)
for it in old:
o, n = compare[old.id.value]
if o and n:
new.locate('id', old.id.value)
if old.type_id.value != new.type_id.value:
errors.append('Items with ID %s (%s, %s) have different type values' % \
(old.id.value, old.f_item_name.value, new.f_item_name.value))
elif old.f_table_name.value and old.f_table_name.value.upper() != new.f_table_name.value.upper():
errors.append('Items with ID %s (%s, %s) have different database tables (%s, %s)' % \
(old.id.value, old.f_item_name.value, new.f_item_name.value, old.f_table_name.value, new.f_table_name.value))
if len(errors):
self.error = "\n".join(errors)
self.success = False
self.show_error(self.error)
def compare_items(self, old, new, owner_id=None):
result = {}
for it in old:
result[it.id.value] = [True, False]
for it in new:
if not owner_id or owner_id == it.owner_rec_id.value:
info = result.get(it.id.value)
if info:
info[1] = True
else:
result[it.id.value] = [False, True]
return result
def analize_data(self):
if self.success:
self.show_progress(self.task.language('import_analyzing'))
try:
task = self.task
db_sql = []
adm_sql = []
deltas = {}
delta = self.get_delta('sys_indices', options=['delete'])
for d in delta:
table_name = self.get_table_name(d.owner_rec_id.value)
if table_name:
db_sql.append(indices_delete_sql(task.sys_indices, d))
adm_sql.append(delta.apply_sql())
delta = self.get_delta('sys_items', 'sys_fields')
self.check_generator(task.sys_items, delta)
for d in delta:
if d.rec_inserted():
db_sql.append(items_insert_sql(task.sys_items, d,
new_fields=self.get_new_fields(d.id.value)))
elif d.rec_modified():
db_sql.append(items_update_sql(task.sys_items, d))
elif d.rec_deleted():
db_sql.append(items_delete_sql(task.sys_items, d))
self.refresh_old_item('sys_items')
delta = self.get_delta('sys_items')
self.check_generator(task.sys_items, delta)
adm_sql.append(delta.apply_sql())
self.refresh_old_item('sys_fields')
delta = self.get_delta('sys_fields')
adm_sql.append(delta.apply_sql())
self.refresh_old_item('sys_indices')
delta = self.get_delta('sys_indices', options=['update', 'insert'])
for d in delta:
table_name = self.get_table_name(d.owner_rec_id.value)
if table_name:
if d.rec_inserted():
db_sql.append(indices_insert_sql(
task.sys_indices,
d, table_name,
self.get_new_fields(d.owner_rec_id.value),
foreign_key_dict=self.get_foreign_key_dict(d)
)
)
elif d.rec_deleted():
db_sql.append(indices_delete_sql(task.sys_indices, d))
adm_sql.append(delta.apply_sql())
for item_name in ['sys_filters', 'sys_report_params', 'sys_roles', 'sys_params',
'sys_privileges', 'sys_lookup_lists']:
delta = self.get_delta(item_name)
adm_sql.append(delta.apply_sql())
self.db_sql = self.sqls_to_list(db_sql)
self.adm_sql = self.sqls_to_list(adm_sql)
except Exception as e:
self.task.log.exception(e)
self.success = False
self.show_error(e)
def get_table_name(self, item_id):
items = self.new_items['sys_items']
if items.locate('id', item_id):
if not items.f_virtual_table.value:
return items.f_table_name.value
def get_foreign_key_dict(self, ind):
dic = None
if ind.f_foreign_index.value:
dic = {}
fields = self.new_items['sys_fields']
fields.locate('id', ind.f_foreign_field.value)
dic['key'] = fields.f_db_field_name.value
ref_id = fields.f_object.value
items = self.new_items['sys_items']
items.locate('id', ref_id)
dic['ref'] = items.f_table_name.value
primary_key = items.f_primary_key.value
fields.locate('id', primary_key)
dic['primary_key'] = fields.f_db_field_name.value
return dic
def get_new_fields(self, item_id):
result = []
items = self.new_items['sys_items']
if items.locate('id', item_id):
parent_id = items.parent.value
new_fields = self.new_items['sys_fields']
for field in new_fields:
if field.owner_rec_id.value in [item_id, parent_id]:
if not field.f_master_field.value:
dic = {}
dic['id'] = field.id.value
dic['field_name'] = field.f_db_field_name.value
dic['data_type'] = field.f_data_type.value
dic['size'] = field.f_size.value
dic['default_value'] = ''#field.f_default_value.value
dic['primary_key'] = field.id.value == items.f_primary_key.value
result.append(dic)
return result
def can_copy_field(self, field):
if field.owner.item_name == 'sys_params':
if field.field_name in self.params_hidden_fields:
return False
if field.owner.item_name == 'sys_items':
if field.field_name in self.items_hidden_fields:
return False
return True
def copy_record(self, old, new):
for old_field in old.fields:
if self.can_copy_field(old_field):
new_field = new.field_by_name(old_field.field_name)
if new_field:
old_field.value = new_field.raw_value
def update_item(self, item_name, detail_name=None,
options=['update', 'insert', 'delete'], owner=None):
new = self.new_items[item_name]
if owner:
old = owner.detail_by_name(item_name)
old.open(expanded=False)
else:
old = self.old_items[item_name]
owner_id = None
if owner:
owner_id = owner.id.value
compare = self.compare_items(old, new, owner_id)
if 'delete' in options:
old.first()
while not old.eof():
if not owner_id or owner_id == old.owner_rec_id.value:
o, n = compare[old.id.value]
if o and not n:
old.delete()
else:
old.next()
else:
old.next()
if 'update' in options:
new_ids = {}
for it in new:
new_ids[new.id.value] = new.rec_no
for it in old:
if not owner_id or owner_id == it.owner_rec_id.value:
o, n = compare[old.id.value]
if o and n:
rec = new_ids.get(old.id.value)
if rec is not None:
new.rec_no = rec
old.edit()
self.copy_record(old, new)
if detail_name:
self.update_item(detail_name, owner=old)
old.post()
if 'insert' in options:
for it in new:
if not owner_id or owner_id == it.owner_rec_id.value:
o, n = compare[new.id.value]
if not o and n:
old.append()
self.copy_record(old, new)
if detail_name:
self.update_item(detail_name, owner=old)
old.post()
return old
def get_delta(self, item_name, detail_name=None, options=['update', 'insert', 'delete']):
item = self.update_item(item_name, detail_name, options)
return item.delta()
def check_generator(self, item, delta):
for d in delta:
if d.rec_inserted() and item.task.task_db_module.NEED_GENERATOR and \
d.f_primary_key.value and not d.f_gen_name.value:
d.edit()
d.f_gen_name.value = '%s_SEQ' % d.f_table_name.value
d.post()
def refresh_old_item(self, item_name):
item = self.task.item_by_name(item_name).copy(handlers=False)
item.open(expanded=False)
self.old_items[item_name] = item
def wait_ready(self):
if self.success:
if self.from_client:
self.show_progress(self.task.language('import_waiting_close'))
request_count = int(self.from_client)
if consts.IMPORT_DELAY:
time.sleep(consts.IMPORT_DELAY)
else:
while True:
i = 0
if self.task.app._busy > request_count:
time.sleep(0.1)
i += 1
if i > 3000:
break
else:
break
def import_databases(self):
if self.success:
self.show_progress(self.task.language('import_changing_db'))
connection = self.execute_ddl()
try:
if self.success:
admin_name = os.path.join(self.task.work_dir, 'admin.sqlite')
tmp_admin_name = os.path.join(self.task.work_dir, '_admin.sqlite')
if self.db_module.DDL_ROLLBACK:
shutil.copy2(admin_name, tmp_admin_name)
self.show_progress(self.task.language('import_changing_admin'))
result, error = self.task.execute(self.adm_sql)
self.error = error
if self.error:
self.success = False
if self.db_module.DDL_ROLLBACK:
if self.success:
connection.commit()
os.remove(tmp_admin_name)
else:
os.rename(tmp_admin_name, admin_name)
connection.rollback()
finally:
connection.close();
consts.read_settings()
consts.read_language()
def execute_ddl(self):
task = self.task
info = []
error = None
connection = None
try:
connection = self.db_module.connect(
task.task_db_database,
task.task_db_user,
task.task_db_password,
task.task_db_host,
task.task_db_port,
task.task_db_encoding,
task.task_db_server)
if self.db_sql:
cursor = connection.cursor()
for sql in self.db_sql:
try:
cursor.execute(sql)
except Exception as x:
self.task.log.exception('Error: %s query: %s' % (x, sql))
error = error_message(x)
info.append({'sql': sql, 'error': error})
if error and self.db_module.DDL_ROLLBACK:
break
if self.db_module.DDL_ROLLBACK:
if error:
self.success = False
else:
connection.commit()
except Exception as x:
error = str(x)
info.append({'error': error})
self.show_info(info)
return connection
def sqls_to_list(self, sqls, result=None):
if result is None:
result = []
for sql in sqls:
if sql:
if type(sql) == list:
self.sqls_to_list(sql, result)
else:
result.append(sql)
return result
def update_logs(self):
if self.success:
result = self.task.language('import_success')
if self.error:
result = self.task.language('import_errors')
else:
result = self.task.language('import_failed')
self.task.log.info(result)
self.server_log = '%s\n\n%s' % (result.upper(), self.server_log)
log_dir = os.path.join(self.task.work_dir, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file_name = os.path.join(log_dir, 'import_%s.log' % datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
file_write(log_file_name, self.server_log)
if self.success:
message = '<h3 class="text-center">%s</h3>' % result
else:
message = '<h3 class="text-center text-error">%s</h3>' % result
self.client_log = '%s<h4 class="text-info">%s</h4><div>%s</div>' % \
(message, self.task.language('import_log'), self.client_log)
def copy_files(self):
if self.success:
self.show_progress(self.task.language('import_copying'))
dir_util.copy_tree(self.tmpdir, self.task.work_dir)
def tidy_up(self):
self.show_progress(self.task.language('import_deleteing_files'))
try:
if self.tmpdir and os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
if self.success or self.from_client:
os.remove(self.file_name)
except Exception as e:
self.task.log.exception(e)
self.show_error(e)
def show_progress(self, string):
self.task.log.info(string)
self.client_log += '<h5>' + string + '</h5>'
self.server_log += '\n%s\n' % string
def show_info(self, errors):
for info in errors:
sql = info.get('sql')
error = info.get('error')
if sql:
self.task.log.info(sql)
if error:
self.client_log += '<div class="text-error" style="margin-bottom: 10px; margin-left: 20px;">' + sql + '</div>'
else:
self.client_log += '<div style="margin-bottom: 10px; margin-left: 20px;">' + sql + '</div>'
self.server_log += '\n%s' % sql
if error:
self.show_error(error)
def show_error(self, error):
mess = self.format_error(error_message(error))
self.client_log += '<div class="text-error" style="margin-left: 40px;">' + mess + '</div>'
self.server_log += '\n%s' % error
def format_error(self, error):
try:
arr = str(error).split('\n')
lines = []
for line in arr:
line = line.replace('\t', ' ')
spaces = 0
for ch in line:
if ch == ' ':
spaces += 1
else:
break
if spaces:
line = '<span style="white-space: pre; margin-left: %spx">%s</span>' % (10 * (spaces - 1), line)
else:
line = '<span style="white-space: pre;">%s</span>' % line
lines.append(line)
result = '<br>'.join(lines)
return '<div class="text-error">%s</div>' % result
except:
return error
def project_empty(self):
items = self.task.sys_items.copy(handlers=False)
items.open(fields=['id', 'f_table_name'])
for i in items:
if i.f_table_name.value:
return False
return True
|
ice/debug.py
|
reavessm/Ice
| 578 |
144064
|
# encoding: utf-8
import os
import pastebin
import filesystem
import paths
import settings
def debug_log_contents():
fs = filesystem.RealFilesystem()
return "\n".join([
debug_string_for_file(settings.settings_file_path('config.txt', fs)),
debug_string_for_file(settings.settings_file_path('consoles.txt', fs)),
debug_string_for_file(settings.settings_file_path('emulators.txt', fs)),
debug_string_for_file(paths.log_file_location()),
])
return log_file_contents()
def debug_string_for_file(path):
template = """\
======= {} ({}) =======
{}\
"""
return template.format(os.path.basename(path), path, file_contents(path))
def file_contents(path):
with open(path, 'r') as f:
return f.read()
def make_paste(contents):
api = pastebin.PastebinAPI()
return api.paste(
'50de643bdfa229b7488a663091fedf59',
contents,
paste_name = 'Ice Debug Logs',
paste_private = 'unlisted',
)
def paste_debug_logs():
url = make_paste(debug_log_contents())
print "You can find your logs at:\n"
print "\t%s\n" % url
print "Please include this link in any bug reports."
|
src/tests/v14/test_issue_126.py
|
TetianaHrunyk/FireO
| 231 |
144065
|
from fireo.fields import TextField, NumberField
from fireo.models import Model
class City(Model):
name = TextField()
population = NumberField()
def test_issue_126():
city = City.collection.create(name='NYC', population=500000, no_return=True)
assert city == None
|
tests/conftest.py
|
lexdene/aiomcache
| 121 |
144077
|
<reponame>lexdene/aiomcache<gh_stars>100-1000
import asyncio
import collections
import gc
import logging
import pytest
import re
import socket
import sys
import time
import uuid
import warnings
import docker as docker_mod
import memcache
import aiomcache
mcache_server_option = None
def pytest_addoption(parser):
parser.addoption(
'--memcached', help='Memcached server')
class _AssertWarnsContext:
"""A context manager used to implement TestCase.assertWarns* methods."""
def __init__(self, expected, expected_regex=None):
self.expected = expected
if expected_regex is not None:
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.obj_name = None
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
__tracebackhide__ = True
assert 0, '"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching))
if self.obj_name:
__tracebackhide__ = True
assert 0, "{} not triggered by {}".format(exc_name,
self.obj_name)
else:
__tracebackhide__ = True
assert 0, "{} not triggered".format(exc_name)
_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
["records", "output"])
class _CapturingHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
def __init__(self):
logging.Handler.__init__(self)
self.watcher = _LoggingWatcher([], [])
def flush(self):
pass
def emit(self, record):
self.watcher.records.append(record)
msg = self.format(record)
self.watcher.output.append(msg)
class _AssertLogsContext:
"""A context manager used to implement TestCase.assertLogs()."""
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def __init__(self, logger_name=None, level=None):
self.logger_name = logger_name
if level:
self.level = logging._nameToLevel.get(level, level)
else:
self.level = logging.INFO
self.msg = None
def __enter__(self):
if isinstance(self.logger_name, logging.Logger):
logger = self.logger = self.logger_name
else:
logger = self.logger = logging.getLogger(self.logger_name)
formatter = logging.Formatter(self.LOGGING_FORMAT)
handler = _CapturingHandler()
handler.setFormatter(formatter)
self.watcher = handler.watcher
self.old_handlers = logger.handlers[:]
self.old_level = logger.level
self.old_propagate = logger.propagate
logger.handlers = [handler]
logger.setLevel(self.level)
logger.propagate = False
return handler.watcher
def __exit__(self, exc_type, exc_value, tb):
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if len(self.watcher.records) == 0:
__tracebackhide__ = True
assert 0, ("no logs of level {} or higher triggered on {}"
.format(logging.getLevelName(self.level),
self.logger.name))
@pytest.yield_fixture
def warning():
yield _AssertWarnsContext
@pytest.yield_fixture
def log():
yield _AssertLogsContext
@pytest.fixture(scope='session')
def unused_port():
def f():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
return s.getsockname()[1]
return f
@pytest.yield_fixture
def loop(request):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
yield loop
if not loop._closed:
loop.call_soon(loop.stop)
loop.run_forever()
loop.close()
gc.collect()
asyncio.set_event_loop(None)
@pytest.mark.tryfirst
def pytest_pycollect_makeitem(collector, name, obj):
if collector.funcnamefilter(name):
if not callable(obj):
return
item = pytest.Function(name, parent=collector)
if 'run_loop' in item.keywords:
return list(collector._genfunctions(name, obj))
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
"""
Run asyncio marked test functions in an event loop instead of a normal
function call.
"""
if 'run_loop' in pyfuncitem.keywords:
funcargs = pyfuncitem.funcargs
loop = funcargs['loop']
testargs = {arg: funcargs[arg]
for arg in pyfuncitem._fixtureinfo.argnames}
loop.run_until_complete(pyfuncitem.obj(**testargs))
return True
def pytest_runtest_setup(item):
global mcache_server_option
if 'run_loop' in item.keywords and 'loop' not in item.fixturenames:
# inject an event loop fixture for all async tests
item.fixturenames.append('loop')
mcache_server_option = item.config.getoption('--memcached')
def pytest_ignore_collect(path, config):
if 'test_py35' in str(path):
if sys.version_info < (3, 5, 0):
return True
@pytest.fixture(scope='session')
def session_id():
'''Unique session identifier, random string.'''
return str(uuid.uuid4())
@pytest.fixture(scope='session')
def docker():
return docker_mod.from_env()
def mcache_server_actual(host, port='11211'):
port = int(port)
container = {
'host': host,
'port': port,
}
container['mcache_params'] = container.copy()
return container
def mcache_server_docker(unused_port, docker, session_id):
docker.pull('memcached:alpine')
container = docker.create_container(
image='memcached:alpine',
name='memcached-test-server-{}'.format(session_id),
ports=[11211],
detach=True,
)
try:
docker.start(container=container['Id'])
inspection = docker.inspect_container(container['Id'])
host = inspection['NetworkSettings']['IPAddress']
port = 11211
mcache_params = dict(host=host, port=port)
delay = 0.001
for i in range(10):
try:
conn = memcache.Client(
['{host}:{port}'.format_map(mcache_params)])
conn.get_stats()
break
except Exception:
time.sleep(delay)
delay *= 2
else:
pytest.fail("Cannot start memcached")
container['host'] = host
container['port'] = port
container['mcache_params'] = mcache_params
time.sleep(0.1)
yield container
finally:
docker.kill(container=container['Id'])
docker.remove_container(container['Id'])
@pytest.fixture(scope='session')
def mcache_server(unused_port, docker, session_id):
if not mcache_server_option:
yield from mcache_server_docker(unused_port, docker, session_id)
else:
mcache_params = mcache_server_option.split(':')
yield mcache_server_actual(*mcache_params)
@pytest.fixture
def mcache_params(mcache_server):
return dict(**mcache_server['mcache_params'])
@pytest.yield_fixture
def mcache(mcache_params, loop):
client = aiomcache.Client(loop=loop, **mcache_params)
yield client
client.close()
|
reviewboard/admin/forms/change_form.py
|
b1pb1p/reviewboard
| 921 |
144083
|
<gh_stars>100-1000
"""Form-related classes for the administration Change Form pages."""
from __future__ import unicode_literals
import itertools
from django import forms
from django.contrib.admin.helpers import Fieldline, Fieldset
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.models import User
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from reviewboard.admin.form_widgets import (RelatedGroupWidget,
RelatedRepositoryWidget,
RelatedUserWidget)
from reviewboard.reviews.models import Group
from reviewboard.scmtools.models import Repository
class ChangeFormFieldset(Fieldset):
"""A fieldset in an administration change form.
This takes care of providing state to the change form to represent a
fieldset and each row in that fieldset.
The fieldset makes use of the ``.rb-c-form-fieldset`` CSS component.
"""
def __init__(self, form, classes=(), **kwargs):
"""Initialize the fieldset.
Args:
form (django.contrib.admin.helpers.AdminForm):
The administration form owning the fieldset.
classes (tuple, optional):
Additional CSS classes to add to the ``<fieldset>`` element.
**kwargs (dict):
Keyword arguments to pass to the parent class.
"""
if classes:
# Transform any Django-named CSS classes to what we expect for our
# rb-c-admin-fieldset CSS component's modifiers.
css_class_map = {
'collapse': ('-can-collapse', '-is-collapsed'),
'wide': ('-is-wide',),
}
classes = tuple(itertools.chain.from_iterable(
css_class_map.get(css_class, (css_class,))
for css_class in classes
))
self.collapsed = '-is-collapsed' in classes
super(ChangeFormFieldset, self).__init__(
form,
classes=('rb-c-form-fieldset',) + classes,
**kwargs)
def render(self, context):
"""Render the fieldset to HTML.
This will default to rendering using the
``admin/includes/fieldset.html`` template. A
:py:class:`~django.contrib.admin.ModelAdmin` subclass my define a
``fieldset_template_name`` attribute specifying an alternative template
to use for its fieldsets.
The template will inherit the provided context, and will contain
this fieldset instance as ``fieldset``.
Args:
context (django.template.Context):
The current template context.
Returns:
django.utils.safestring.SafeText:
The resulting HTML for the fieldset.
"""
template_name = (
getattr(self.model_admin, 'fieldset_template_name', None) or
'admin/includes/fieldset.html'
)
with context.push():
context['fieldset'] = self
return render_to_string(template_name, context.flatten())
def __iter__(self):
"""Iterate through the rows of the fieldset.
Yields:
ChangeFormRow:
A row in the fieldset.
"""
readonly_fields = self.readonly_fields
model_admin = self.model_admin
for field in self.fields:
yield ChangeFormRow(form=self.form,
field=field,
readonly_fields=readonly_fields,
model_admin=model_admin)
class ChangeFormRow(Fieldline):
"""A row in a fieldset containing one or more fields.
A row may contain multiple fields (though it usually contains only one).
This makes use of the ``.rb-c-form-row`` CSS component.
"""
def __init__(self, *args, **kwargs):
"""Initialize the row.
Args:
*args (tuple):
Positional arguments to pass to the parent class.
**kwargs (dict):
Keyword arguments to pass to the parent class.
"""
super(ChangeFormRow, self).__init__(*args, **kwargs)
self.is_multi_line = len(self.fields) > 1
self.classes = ' '.join(['rb-c-form-row'] + [
'field-%s' % field_name
for field_name in self.fields
])
self.row_id = 'row-%s' % self.fields[0]
def __iter__(self):
"""Iterate through the list of fields in the row.
Yields:
ChangeFormField:
A field in the row.
"""
for admin_field in super(ChangeFormRow, self).__iter__():
yield ChangeFormField(self, admin_field)
class ChangeFormField(object):
"""A wrapper for a field on the change form.
This takes care of providing state to the change form to represent an
individual field on a row, providing any field validation errors.
It also takes care of creating ideal representations of some widgets
(such as our special related object widgets for users, groups, and
repositories, and filtered multi-select for other many-to-many relations).
This makes use of the ``.rb-c-form-field`` CSS component.
"""
def __init__(self, form_row, admin_field):
"""Initialize the field wrapper.
Args:
form_row (ChangeFormRow):
The parent row containing the field.
admin_field (django.contrib.admin.helpers.AdminField):
The administration field wrapper containing state for this
field.
"""
bound_field = admin_field.field
has_field_first = False
show_errors = False
is_checkbox = getattr(admin_field, 'is_checkbox', False)
is_readonly = getattr(admin_field, 'is_readonly', False)
classes = ['rb-c-form-field']
if is_readonly:
classes.append('-is-read-only')
errors = []
else:
form_field = bound_field.field
errors = admin_field.errors()
if form_field.required:
classes.append('-is-required')
if errors:
classes.append('-has-errors')
show_errors = True
if isinstance(form_field, forms.ModelMultipleChoiceField):
widget = form_field.widget
model = form_field.queryset.model
if type(widget) is forms.ModelMultipleChoiceField.widget:
# This is a default widget for a model multi-choice field.
# Let's see if we use a better default.
if model is User:
form_field.widget = RelatedUserWidget()
elif model is Group:
form_field.widget = RelatedGroupWidget()
elif model is Repository:
form_field.widget = RelatedRepositoryWidget()
else:
# We can at least use the filtered selector.
form_field.widget = FilteredSelectMultiple(
form_field.label,
is_stacked=False)
if type(widget) is not widget:
# We've replaced the widget, so get rid of the old bound
# help text while we're at it.
bound_field.help_text = None
if form_row.is_multi_line:
classes.append('field-%s' % bound_field.name)
elif is_checkbox:
classes.append('-has-input-first')
has_field_first = True
self.admin_field = admin_field
self.classes = ' '.join(classes)
self.errors = errors
self.field = bound_field
self.has_field_first = has_field_first
self.is_checkbox = is_checkbox
self.is_first = admin_field.is_first
self.is_readonly = is_readonly
self.show_errors = show_errors
def label_tag(self):
"""Return the HTML for a label tag for this field.
This will create a ``<label class="rb-c-form-field_label">`` element
containing the label.
Returns:
django.utils.safestring.SafeText:
The ``<label>`` tag for this field.
"""
field = self.field
attrs = {}
classes = ['rb-c-form-field__label']
if not self.is_first:
classes.append('-is-inline')
attrs['class'] = ' '.join(classes)
if self.is_readonly:
return format_html('<label{0}>{1}:</label>',
flatatt(attrs),
capfirst(force_text(field['label'])))
else:
if self.has_field_first:
label_suffix = ''
else:
label_suffix = None
return field.label_tag(
contents=conditional_escape(force_text(field.label)),
attrs=attrs,
label_suffix=label_suffix)
def render(self):
"""Render the field.
This will return the rendered field as HTML, or just the field's value
if the field is meant to be read-only.
Returns:
django.utils.safestring.SafeText:
The rendered content for the field.
"""
if self.is_readonly:
return format_html(
'<div class="rb-c-form-field__readonly-value">{0}</div>',
linebreaksbr(self.admin_field.contents()))
else:
return mark_safe(six.text_type(self.field))
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py
|
yiwc/robotics-world
| 681 |
144113
|
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerBinPickingEnvV2(SawyerXYZEnv):
"""
Motivation for V2:
V1 was often unsolvable because the cube could be located outside of
the starting bin. It could even be near the base of the Sawyer and out
of reach of the gripper. V2 changes the `obj_low` and `obj_high` bounds
to fix this.
Changelog from V1 to V2:
- (7/20/20) Changed object initialization space
- (7/24/20) Added Byron's XML changes
- (11/23/20) Updated reward function to new pick-place style
"""
def __init__(self):
hand_low = (-0.5, 0.40, 0.07)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.21, 0.65, 0.02)
obj_high = (-0.03, 0.75, 0.02)
# Small bounds around the center of the target bin
goal_low = np.array([0.1199, 0.699, -0.001])
goal_high = np.array([0.1201, 0.701, +0.001])
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': 0.3,
'obj_init_pos': np.array([-0.12, 0.7, 0.02]),
'hand_init_pos': np.array((0, 0.6, 0.2)),
}
self.goal = np.array([0.12, 0.7, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._target_to_obj_init = None
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
)
self.goal_and_obj_space = Box(
np.hstack((goal_low[:2], obj_low[:2])),
np.hstack((goal_high[:2], obj_high[:2])),
)
self.goal_space = Box(goal_low, goal_high)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_bin_picking.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
near_object,
grasp_success,
obj_to_target,
grasp_reward,
in_place_reward
) = self.compute_reward(action, obs)
info = {
'success': float(obj_to_target <= 0.05),
'near_object': float(near_object),
'grasp_success': float(grasp_success),
'grasp_reward': grasp_reward,
'in_place_reward': in_place_reward,
'obj_to_target': obj_to_target,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return []
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('objGeom')
def _get_pos_objects(self):
return self.get_body_com('obj')
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('obj')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
obj_height = self.get_body_com('obj')[2]
if self.random_init:
self.obj_init_pos = self._get_state_rand_vec()[:2]
self.obj_init_pos = np.concatenate((self.obj_init_pos, [obj_height]))
self._set_obj_xyz(self.obj_init_pos)
self._target_pos = self.get_body_com('bin_goal')
self._target_to_obj_init = None
return self._get_obs()
def compute_reward(self, action, obs):
hand = obs[:3]
obj = obs[4:7]
target_to_obj = np.linalg.norm(obj - self._target_pos)
if self._target_to_obj_init is None:
self._target_to_obj_init = target_to_obj
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=self._target_to_obj_init,
sigmoid='long_tail',
)
threshold = 0.03
radii = [
np.linalg.norm(hand[:2] - self.obj_init_pos[:2]),
np.linalg.norm(hand[:2] - self._target_pos[:2])
]
# floor is a *pair* of 3D funnels centered on (1) the object's initial
# position and (2) the desired final position
floor = min([
0.02 * np.log(radius - threshold) + 0.2
if radius > threshold else 0.0
for radius in radii
])
# prevent the hand from running into the edge of the bins by keeping
# it above the "floor"
above_floor = 1.0 if hand[2] >= floor else reward_utils.tolerance(
max(floor - hand[2], 0.0),
bounds=(0.0, 0.01),
margin=0.05,
sigmoid='long_tail',
)
object_grasped = self._gripper_caging_reward(
action,
obj,
obj_radius=0.015,
pad_success_thresh=0.05,
object_reach_radius=0.01,
xz_thresh=0.01,
desired_gripper_effort=0.7,
high_density=True,
)
reward = reward_utils.hamacher_product(object_grasped, in_place)
near_object = np.linalg.norm(obj - hand) < 0.04
pinched_without_obj = obs[3] < 0.43
lifted = obj[2] - 0.02 > self.obj_init_pos[2]
# Increase reward when properly grabbed obj
grasp_success = near_object and lifted and not pinched_without_obj
if grasp_success:
reward += 1. + 5. * reward_utils.hamacher_product(
above_floor, in_place
)
# Maximize reward on success
if target_to_obj < self.TARGET_RADIUS:
reward = 10.
return (
reward,
near_object,
grasp_success,
target_to_obj,
object_grasped,
in_place
)
|
python/general-python/check-server-cache/serverCache.py
|
NagarjunaManupati/ESRI
| 272 |
144166
|
<reponame>NagarjunaManupati/ESRI
## serverCache.py
##
## Module that will allow you to use the RESTFul endpoints on the
## server for further manipulation
##
## Written By: <NAME>. & <NAME>.
##
## Created On: August 18, 2014
""" This script is designed to generate a token login and then reach out
to a server service passing that token and retrieve
server items.
This script creates a server object and then sends the request for the
cache to the server and gets a JSON response.
This should work with vanilla installs of python version 2.7.x.
ArcPy is not required.
Sample syntax is:
######
sv = serverPython('ServerUsername', 'ServerPassword', 'ServerRestURL', 'ServerTokenURL')
for i in sv.getStatus("ServiceName", "ServiceType"):
print "Scale level of cache: " + str(i['levelID']) + " Percent Complete: " + str(i['percent'])
######
The two service types are MapServer and ImageServer
"""
import urllib
import urllib2
import json
import httplib
import time
import getpass
import smtplib
class serverPython:
def __init__(self, username, password, refererURL, tokenURL):
"""This instantiates the serverPython object and grabs the variables from the user"""
self.__username = username
self.__password = password
self.__client = "referer"
self.__referer = refererURL
self.__expiration = "6000"
self.__encrypted = "false"
self.__format = "json"
self.__tokenURL = tokenURL
self.__token = self.getToken()
self.__reportingTools = self.__referer + "/services/System/ReportingTools/GPServer/ReportCacheStatus/execute"
def __sendRequest(self, urla, data, referer):
"""This private method allows the user to send the request with the referer attached"""
try:
url = urllib2.Request(urla)
url.add_header('referer',referer)
jres = urllib2.urlopen(url, data).read()
return json.loads(jres)
except httplib.IncompleteRead as e: return json.loads(e.partial)
def getToken(self):
"""This generates a token for a user"""
data = {'password': <PASSWORD>.__password,
'f': self.__format,
'username': self.__username,
'client': self.__client,
'referer' : self.__referer,
'expiration': self.__expiration,
'encrypted': self.__encrypted}
return serverPython.__sendRequest(self, self.__tokenURL, urllib.urlencode(data), self.__referer)['token']
def getStatus(self, serviceName, serviceType):
"""This gets the cache status of the map or image server"""
serviceToSend = serviceName + ":" + serviceType
data = {'token' : self.__token,
'service_url': serviceToSend,
'f':self.__format}
return serverPython.__sendRequest(self, self.__reportingTools, urllib.urlencode(data), self.__referer)['results'][0]['value']['lodInfos']
def emailThePeople(self, emailTo, messageToSend, subject):
"""This will send an email using Gmail SMTP. Gmail address is required"""
fromAddress = '<EMAIL>'
message = "\r\n".join([
"From: {0}",
"To: {1}",
"Subject: {2}",
"",
"{3}".format(fromAddress, emailTo, subject, messageToSend)
])
username = '<EMAIL>'
password = '<PASSWORD>'
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(fromAddress, emailTo, message)
server.quit()
if __name__ == "__main__":
sv = serverPython('alex7370', 'password', 'http://alexn.esri.com/arcgis/rest', 'http://alexn.esri.com/arcgis/tokens/')
message = ""
for i in sv.getStatus("Ashley", "MAPSERVER"):
message = message + "Scale level of cache: " + str(i['levelID']) + " Percent Complete: " + str(i['percent']) + "\n"
sv.emailThePeople('<EMAIL>', message, 'Subject')
|
lib/bindings/vs/__init__.py
|
tlalexander/stitchEm
| 182 |
144194
|
<reponame>tlalexander/stitchEm
from vs import *
from camera import *
|
crawler/cdi/cde.py
|
gaybro8777/CiteSeerX
| 108 |
144211
|
#!/usr/local/bin/python2.6
# Crawl Document Exporter
# Python code to export the crawling documents from the crawl repository out
#
# locate the input file and read the entire log file
# input: a MySQL database query
# output: a directory which contains the queried documents in hierachical order
# (or just in a batch)
# e.g., 002/123/234/002.123.234.pdf
#
import os # define invironment variable
import sys
import resource # define resource variable "r"
import output # defines writer
import logging
import logging.handlers
import threading
import time
import datetime
import hashlib
import runconfig # log parser configuration
#from django.conf import settings
import settings
os.environ['DJANGO_SETTINGS_MODULE'] = runconfig.django_settings_module
from subprocess import call
import filter_doc
import commands
import string
import glob
import urllib
import logparsers # log parser module
import counter
import textextract
import crawldb
import printinfo
import shutil
from exception import BadResourceError
# check configurations, including the following items
# (*) permission to write into the output folder
# This is checked by creating and delete a folder called "9999"
# inside the repository folder.
#
def checkConfig():
infoprtr = printinfo.printInfo()
# crawl repository exists
if not os.path.exists(runconfig.crawlrepo):
infoprtr.printStatus('crawlrepo exists','no')
return False
else:
infoprtr.printStatus('crawlrepo exists','yes')
# permission to write into the output folder
testdir = os.path.join(runconfig.cde["outputdir"],'9999')
if os.path.exists(testdir):
shutil.rmtree(testdir)
try:
os.makedirs(testdir)
shutil.rmtree(testdir)
except OSError,e:
print e
infoprtr.printStatus('Write permission to outputdir','no')
return False
# if it passes all configuration checks
return True
def startup():
# record start time
tic = time.time()
# create on-screen information print object
infoprinter = printinfo.printInfo()
# check configurations
if not checkConfig():
infoprinter.printStatus('Configuration check','fail')
raise SystemExit("Change your configurations in runconfig.py")
else:
infoprinter.printStatus('Configuration check','ok')
# create exporter
exporter = output.CiteSeerExporter([runconfig.cde["outputdir"],runconfig.crawlrepo])
# create crawldb
cdb = crawldb.CrawlDB()
# create general log configers and config logs
logconfiger = Log_Configer()
logconfiger.config_loggers()
# process DB query, raise error if ids is empty
dbquery = runconfig.cde["dbquery"]
ids = cdb.queryDocID(dbquery)
infoprinter.printPara('#docid',str(len(ids)))
if not ids:
infoprinter.printStatus('DB query','fail')
os.exit()
# number counter
counters = counter.Counter()
counters.newCounter('all')
counters.setCounter('all',len(ids))
counters.newCounter('copied')
# export each queried document
if runconfig.cde["toggle_export"]:
i = 0
for id in ids:
i = i + 1
print "%9d/%-9d : %9d" % (i,counters.all,id)
if exporter.doc_export(id):
counters.addCounter('copied')
else:
infoprinter.printStatus(str(id),'fail')
# log successful
# check repository to see if output PDF files are there
#msg = doclogger.generator('saved_New',infile,r)
#logging.getLogger('document').info(msg)
#infoprinter.printStatus('Document saved','yes')
# number of documents which are written into db
#counters.addCounter('saved_New')
counters.printCounter()
counters.printCountertoFile(runconfig.cde["summaryfile"])
# record end time to calculate processing time
# because strftime() will truncate the time string when converting to the
# user-defined time format, we add "1" second to compensate this loss.
toc = time.time()
processingtime = time.strftime('%H:%M:%S',time.gmtime(toc-tic+1))
infoprinter.printPara('Processing time: ',processingtime)
def id_to_fname(id,ext):
# if extention (ext) is not provided, use "pdf"
try: ext
except NameError: ext = 'pdf'
p1 = id / 1000000
p2 = (id % 1000000) / 1000
p3 = id % 1000
s1 = str(p1).zfill(3)
s2 = str(p2).zfill(3)
s3 = str(p3).zfill(3)
p = "%s.%s.%s.%s" % (s1, s2, s3, ext)
return os.path.join(p)
def create_instance(config_str,params):
try:
segments = config_str.strip().split('::')
module_name = '.'.join(segments[0].split('.')[:-1])
class_name = segments[0].split('.')[-1]
#params = segments[1:]
__import__(module_name)
module = sys.modules[module_name]
obj = module.__dict__[class_name](params)
return obj
except ValueError:
logging.critical('Invalid config: %s' % config_str)
return None
class Mime_Type_Filter(object):
def __init__(self,allow_doc_type):
# default mimetype
self.doctype = 'unknown' #final document type
# if no document types specified (allow_doc_type), use pdf/postscript by default
try: allow_doc_type
except NameError:
allow_doc_type = ['application/pdf','application/postscript']
self.allowtype = allow_doc_type
# extension of original link file
self.ext = ''
# some website may use "bit stream" so that the content type is
# "octet-streamm". However, we cannot distinguish for sure if this is pdf
# These documents need to be crawled separated with special settings
# mime type information is printed on the screen by default
# if verbose is set to False, mime type information is not printed
#
def check(self,r):
checktyperesult = False
self.doctype = r.content_type
for elem in self.allowtype:
if elem in r.content_type:
# mime type accepted
checktyperesult = True
# print mime type if set
break
# extract extension of the retrieved file
# The original URL is first split by "/". The file name is the last element
# If the file name contains two extensions, use both of them
# e.g., example.pdf.Z -> self.ext = 'pdf.Z'
# But this extension is not used, in the main program.
# It maybe used for other purposes in the future.
paths = r.path.split("/")
filename = paths[-1] # file name is the last element
if filename != '':
filenames = filename.split(".")
filenameslen = len(filenames)
fileexts = ''
if filenameslen >= 2:
# but how to deal with part1.part2.ext1.ext2?
fileexts = filenames[1:filenameslen] #filename.pdf.Z->['pdf','Z']
self.ext = ".".join(fileexts)
return checktyperesult
class Log_Configer(object):
def __init__(self):
self.log_dir = 'log/'
# if log directory does not exist, create one
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def config_loggers(self):
rotating_interval = 'D'
# root logger
logger = logging.getLogger('')
logger.setLevel(logging.WARNING)
log_file = os.path.join(self.log_dir, 'root.log')
h = logging.FileHandler(log_file)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
h.setFormatter(formatter)
logger.addHandler(h)
for name in ['document']:
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.propagate = False
log_file = os.path.join(self.log_dir, name + '.log')
h = logging.handlers.TimedRotatingFileHandler(log_file, rotating_interval)
formatter = logging.Formatter("%(asctime)s - %(message)s")
h.setFormatter(formatter)
logger.addHandler(h)
class Doc_Logger(object):
# used to generate document log content
def __init__(self,hostname,mimetypefilter):
self.hostname = hostname
self._doc_type_filter_cp = mimetypefilter
def generator(self,flag,filepath,r):
msg = '%20s %10s %s %s %s %s' % (flag,self._doc_type_filter_cp.doctype,\
r.crawl_date,r.url,self.hostname,filepath)
return msg
startup()
|
utils/__init__.py
|
intelligentmachines/Handwriting-synthesis
| 161 |
144215
|
<filename>utils/__init__.py
import numpy
import matplotlib
matplotlib.use("AGG")
from matplotlib import pyplot
def plot_stroke(stroke, save_name=None):
# Plot a single example.
f, ax = pyplot.subplots()
x = numpy.cumsum(stroke[:, 1])
y = numpy.cumsum(stroke[:, 2])
size_x = x.max() - x.min() + 1.0
size_y = y.max() - y.min() + 1.0
f.set_size_inches(5.0 * size_x / size_y, 5.0)
cuts = numpy.where(stroke[:, 0] == 1)[0]
start = 0
for cut_value in cuts:
ax.plot(x[start:cut_value], y[start:cut_value], "k-", linewidth=3)
start = cut_value + 1
ax.axis("off") # equal
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if save_name is None:
pyplot.show()
else:
try:
pyplot.savefig(save_name, bbox_inches="tight", pad_inches=0.5)
except Exception:
print("Error building image!: " + save_name)
pyplot.close()
|
tests/test_installer/test_resolve.py
|
sdispater/poet
| 367 |
144233
|
<reponame>sdispater/poet<filename>tests/test_installer/test_resolve.py
# -*- coding: utf-8 -*-
from pip.req.req_install import InstallRequirement
from poet.installer import Installer
from poet.repositories import PyPiRepository
from poet.package.pip_dependency import PipDependency
pendulum_req = InstallRequirement.from_line('pendulum==1.2.0')
pytzdata_req = InstallRequirement.from_line('pytzdata==2017.2')
requests_req = InstallRequirement.from_line('requests==2.13.0')
pendulum_hashes = [
'sha256:a97e3ed9557ac0c5c3742f21fa4d852d7a050dd9b1b517e993aebef2dd2eea52',
'sha256:641140a05f959b37a177866e263f6f53a53b711fae6355336ee832ec1a59da8a'
]
pytzdata_hashes = [
'sha256:a4d11b8123d00e947fac88508292b9e148da884fc64b884d9da3897a35fa2ab0',
'sha256:ec36940a8eec0a2ebc66a257a746428f7b4acce24cc000b3cda4805f259a8cd2'
]
requests_hashes = [
'sha256:66f332ae62593b874a648b10a8cb106bfdacd2c6288ed7dec3713c3a808a6017',
'sha256:b70696ebd1a5e6b627e7e3ac1365a4bc60aaf3495e843c1e70448966c5224cab'
]
def test_resolve(mocker, command):
resolve = mocker.patch('piptools.resolver.Resolver.resolve')
reverse_dependencies = mocker.patch('piptools.resolver.Resolver.reverse_dependencies')
resolve_hashes = mocker.patch('piptools.resolver.Resolver.resolve_hashes')
resolve.return_value = [
pendulum_req,
pytzdata_req,
requests_req,
]
reverse_dependencies.return_value = {
'pytzdata': set(['pendulum'])
}
resolve_hashes.return_value = {
pendulum_req: set(pendulum_hashes),
requests_req: set(requests_hashes),
pytzdata_req: set(pytzdata_hashes),
}
installer = Installer(command, PyPiRepository())
packages = installer._resolve([
PipDependency('pendulum', '^1.2'),
PipDependency('requests', '^2.13')
])
pendulum = packages[0]
pytzdata = packages[1]
requests = packages[2]
# Name
assert 'pendulum' == pendulum['name']
assert 'pytzdata' == pytzdata['name']
assert 'requests' == requests['name']
# Version
assert '1.2.0' == pendulum['version']
assert '2017.2' == pytzdata['version']
assert '2.13.0' == requests['version']
# Version
assert set(pendulum_hashes) == set(pendulum['checksum'])
assert set(pytzdata_hashes) == set(pytzdata['checksum'])
assert set(requests_hashes) == set(requests['checksum'])
# Category
assert 'main' == pendulum['category']
assert 'main' == pytzdata['category']
assert 'main' == requests['category']
# Optional
assert not pendulum['optional']
assert not pytzdata['optional']
assert not requests['optional']
# Python
assert ['*'] == pendulum['python']
assert ['*'] == pytzdata['python']
assert ['*'] == requests['python']
def test_resolve_specific_python(mocker, command):
resolve = mocker.patch('piptools.resolver.Resolver.resolve')
reverse_dependencies = mocker.patch('piptools.resolver.Resolver.reverse_dependencies')
resolve_hashes = mocker.patch('piptools.resolver.Resolver.resolve_hashes')
resolve.return_value = [
pendulum_req,
pytzdata_req,
requests_req,
]
reverse_dependencies.return_value = {
'pytzdata': set(['pendulum'])
}
resolve_hashes.return_value = {
pendulum_req: set(pendulum_hashes),
requests_req: set(requests_hashes),
pytzdata_req: set(pytzdata_hashes),
}
installer = Installer(command, PyPiRepository())
packages = installer._resolve([
PipDependency('pendulum', '^1.2'),
PipDependency('requests', {'version': '^2.13', 'python': '~2.7'})
])
pendulum = packages[0]
pytzdata = packages[1]
requests = packages[2]
# Name
assert 'pendulum' == pendulum['name']
assert 'pytzdata' == pytzdata['name']
assert 'requests' == requests['name']
# Version
assert '1.2.0' == pendulum['version']
assert '2017.2' == pytzdata['version']
assert '2.13.0' == requests['version']
# Version
assert set(pendulum_hashes) == set(pendulum['checksum'])
assert set(pytzdata_hashes) == set(pytzdata['checksum'])
assert set(requests_hashes) == set(requests['checksum'])
# Category
assert 'main' == pendulum['category']
assert 'main' == pytzdata['category']
assert 'main' == requests['category']
# Optional
assert not pendulum['optional']
assert not pytzdata['optional']
assert not requests['optional']
# Python
assert ['*'] == pendulum['python']
assert ['*'] == pytzdata['python']
assert ['~2.7'] == requests['python']
def test_resolve_specific_python_parent(mocker, command):
resolve = mocker.patch('piptools.resolver.Resolver.resolve')
reverse_dependencies = mocker.patch('piptools.resolver.Resolver.reverse_dependencies')
resolve_hashes = mocker.patch('piptools.resolver.Resolver.resolve_hashes')
resolve.return_value = [
pendulum_req,
pytzdata_req,
requests_req,
]
reverse_dependencies.return_value = {
'pytzdata': set(['pendulum'])
}
resolve_hashes.return_value = {
pendulum_req: set(pendulum_hashes),
requests_req: set(requests_hashes),
pytzdata_req: set(pytzdata_hashes),
}
installer = Installer(command, PyPiRepository())
packages = installer._resolve([
PipDependency('pendulum', {'version': '^1.2', 'python': '~2.7'}),
PipDependency('requests', '^2.13')
])
pendulum = packages[0]
pytzdata = packages[1]
requests = packages[2]
# Name
assert 'pendulum' == pendulum['name']
assert 'pytzdata' == pytzdata['name']
assert 'requests' == requests['name']
# Version
assert '1.2.0' == pendulum['version']
assert '2017.2' == pytzdata['version']
assert '2.13.0' == requests['version']
# Version
assert set(pendulum_hashes) == set(pendulum['checksum'])
assert set(pytzdata_hashes) == set(pytzdata['checksum'])
assert set(requests_hashes) == set(requests['checksum'])
# Category
assert 'main' == pendulum['category']
assert 'main' == pytzdata['category']
assert 'main' == requests['category']
# Optional
assert not pendulum['optional']
assert not pytzdata['optional']
assert not requests['optional']
# Python
assert ['~2.7'] == pendulum['python']
assert ['~2.7'] == pytzdata['python']
assert ['*'] == requests['python']
def test_resolve_specific_python_and_wildcard_multiple_parent(mocker, command):
resolve = mocker.patch('piptools.resolver.Resolver.resolve')
reverse_dependencies = mocker.patch('piptools.resolver.Resolver.reverse_dependencies')
resolve_hashes = mocker.patch('piptools.resolver.Resolver.resolve_hashes')
resolve.return_value = [
pendulum_req,
pytzdata_req,
requests_req,
]
reverse_dependencies.return_value = {
'pytzdata': set(['pendulum', 'requests'])
}
resolve_hashes.return_value = {
pendulum_req: set(pendulum_hashes),
requests_req: set(requests_hashes),
pytzdata_req: set(pytzdata_hashes),
}
installer = Installer(command, PyPiRepository())
packages = installer._resolve([
PipDependency('pendulum', {'version': '^1.2', 'python': '~2.7'}),
PipDependency('requests', '^2.13')
])
pendulum = packages[0]
pytzdata = packages[1]
requests = packages[2]
# Name
assert 'pendulum' == pendulum['name']
assert 'pytzdata' == pytzdata['name']
assert 'requests' == requests['name']
# Version
assert '1.2.0' == pendulum['version']
assert '2017.2' == pytzdata['version']
assert '2.13.0' == requests['version']
# Version
assert set(pendulum_hashes) == set(pendulum['checksum'])
assert set(pytzdata_hashes) == set(pytzdata['checksum'])
assert set(requests_hashes) == set(requests['checksum'])
# Category
assert 'main' == pendulum['category']
assert 'main' == pytzdata['category']
assert 'main' == requests['category']
# Optional
assert not pendulum['optional']
assert not pytzdata['optional']
assert not requests['optional']
# Python
assert ['~2.7'] == pendulum['python']
assert ['*'] == pytzdata['python']
assert ['*'] == requests['python']
|
dawn/prepare_dawn_bs.py
|
McCrearyD/imagenet18
| 716 |
144268
|
<reponame>McCrearyD/imagenet18<filename>dawn/prepare_dawn_bs.py
#!/usr/bin/env python
#
# Prepares DAWN TSV file from TensorBoard events url
import sys, os, re
from dateutil import parser
events_url = 'https://s3.amazonaws.com/yaroslavvb/logs/release-sixteen.04.events'
import os
import glob
import numpy as np
import datetime as dt
import pytz
from tensorflow.python.summary import summary_iterator
import argparse
parser = argparse.ArgumentParser(description='launch')
parser.add_argument('--ignore-eval', action='store_true',
help='ignore eval time')
args = parser.parse_args()
def get_events(fname, x_axis='step'):
"""Returns event dictionary for given run, has form
{tag1: {step1: val1}, tag2: ..}
If x_axis is set to "time", step is replaced by timestamp
"""
result = {}
events = summary_iterator.summary_iterator(fname)
try:
for event in events:
if x_axis == 'step':
x_val = event.step
elif x_axis == 'time':
x_val = event.wall_time
else:
assert False, f"Unknown x_axis ({x_axis})"
vals = {val.tag: val.simple_value for val in event.summary.value}
# step_time: value
for tag in vals:
event_dict = result.setdefault(tag, {})
if x_val in event_dict:
print(f"Warning, overwriting {tag} for {x_axis}={x_val}")
print(f"old val={event_dict[x_val]}")
print(f"new val={vals[tag]}")
event_dict[x_val] = vals[tag]
except Exception as e:
print(e)
pass
return result
def datetime_from_seconds(seconds, timezone="US/Pacific"):
"""
timezone: pytz timezone name to use for conversion, ie, UTC or US/Pacific
"""
return dt.datetime.fromtimestamp(seconds, pytz.timezone(timezone))
def download_file(url):
import urllib.request
response = urllib.request.urlopen(url)
data = response.read()
return data
def main():
with open('/tmp/events', 'wb') as f:
f.write(download_file(events_url))
events_dict=get_events('/tmp/events', 'step')
# build step->time dict for eval events
lr = events_dict['sizes/batch']
for step in lr:
print('{"batch_size": '+str(16*8*lr[step])+', "example": '+str(step)+"},")
if __name__=='__main__':
main()
|
zfit/core/pdf.py
|
nsahoo/zfit
| 129 |
144279
|
from __future__ import annotations
# Copyright (c) 2021 zfit
import typing
from collections.abc import Callable
from contextlib import suppress
import tensorflow_probability as tfp
import zfit_interface.typing as ztyping
from zfit_interface.pdf import ZfitPDF
from zfit_interface.variables import ZfitVar, ZfitSpace, ZfitParam
from zfit import convert_to_parameter, z
from zfit._variables.varsupport import VarSupports
from zfit.core.func import Func
from zfit.core.values import ValueHolder
from zfit.util.container import convert_to_container
from zfit.util.exception import (
SpecificFunctionNotImplemented,
NotExtendedPDFError, WorkInProgressError,
)
class Integration:
_analytic_integrals = {}
def __init__(self, mc_sampler=None, draws_per_dim=None, numeric_integrator=None):
self._analytic_integrals = self._analytic_integrals.copy()
if mc_sampler is None:
mc_sampler = lambda *args, **kwargs: tfp.mcmc.sample_halton_sequence(
*args, randomized=False, **kwargs
)
if numeric_integrator is None:
numeric_integrator = False # TODO
if draws_per_dim is None:
draws_per_dim = 40_000
self.numeric_integrator = numeric_integrator
self.mc_sampler = mc_sampler
self.draws_per_dim = draws_per_dim
def register_on_object(
self, var: ztyping.Variable, func: Callable, overwrite: bool = False
):
var = convert_to_container(var, frozenset)
if var in self._analytic_integrals and not overwrite:
raise ValueError(
f"An analytic integral for {var} is already registered and 'overwrite' is "
f"set to False."
)
self._analytic_integrals[var] = func
def get_available(self, var):
var = convert_to_container(var, frozenset)
candidates = sorted(
(v for v in self._analytic_integrals if var.issubset(v)), key=len
)
return {v: self._analytic_integrals[v] for v in candidates}
@property
def has_full(self, var):
var = convert_to_container(var, frozenset)
return len(list(self.get_available(var).keys()) + [[]][0]) == len(var)
def has_partial(self, var):
var = convert_to_container(var, frozenset)
return bool(self.get_available(var))
class PDF(Func, ZfitPDF):
def __init__(
self,
obs: typing.Mapping[str, ZfitSpace] = None,
params: typing.Mapping[str, ZfitParam] = None,
var: typing.Mapping[str, ZfitVar] = None,
supports: typing.Mapping[str, typing.Mapping[str, VarSupports]] = None,
extended: bool = None,
norm: typing.Mapping[str, ZfitSpace] = None,
label: str | None = None,
):
self.supports = supports
if norm is None:
norm = obs.values() # TODO: preprocess
super().__init__(var=var, label=label)
if norm is None:
norm = self.space
self.norm = norm
if extended is not None:
self._set_yield(extended)
self.integration = Integration()
def _set_yield(self, value):
# if self.is_extended:
# raise AlreadyExtendedPDFError(f"Cannot extend {self}, is already extended.")
value = convert_to_parameter(value)
# self.add_cache_deps(value) # TODO
self._yield = value
@property
def is_extended(self) -> bool:
"""Flag to tell whether the model is extended or not.
Returns:
A boolean.
"""
return self._yield is not None
def __call__(self, var):
if self.is_extended:
return self.ext_pdf(var)
else:
return self.pdf(var)
def _pdf(self, var, norm):
raise SpecificFunctionNotImplemented
def pdf(
self,
var: ztyping.VarInputType,
norm: ztyping.NormInputType = None,
*,
options=None,
) -> ztyping.PDFReturnType:
"""Probability density function, normalized over `norm`.
Args:
var: `float` or `double` `Tensor`.
norm: :py:class:`~zfit.Space` to normalize over
Returns:
:py:class:`tf.Tensor` of type `self.dtype`.
"""
var = self._convert_check_input_var(var)
norm = self._convert_check_input_norm(norm, var=var)
if var.space is not None:
return self.integrate(limits=var, norm=norm, options=options)
value = self._call_pdf(var=var, norm=norm, options=options)
return value
# with self._convert_sort_x(var) as var:
# value = self._single_hook_pdf(x=var, norm_range=norm)
# if run.numeric_checks:
# z.check_numerics(value, message="Check if pdf output contains any NaNs of Infs")
# return z.to_real(value)
@z.function(wraps="model")
def _call_pdf(self, var, norm, *, options=None):
return self._pdf(var, norm) # TODO
def _ext_pdf(self, var, norm):
raise SpecificFunctionNotImplemented
def ext_pdf(
self,
var: ztyping.VarInputType,
norm: ztyping.NormInputType = None,
*,
options=None,
) -> ztyping.PDFReturnType:
"""Probability density function, normalized over `norm`.OneDim.
Args:
var: `float` or `double` `Tensor`.
norm: :py:class:`~zfit.Space` to normalize over
Returns:
:py:class:`tf.Tensor` of type `self.dtype`.
"""
if not self.is_extended:
raise NotExtendedPDFError
var = self._convert_check_input_var(var)
norm = self._convert_check_input_norm(norm, var=var)
if var.space is not None:
return self.integrate(limits=var, norm=norm, options=options)
return self._call_ext_pdf(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_ext_pdf(self, var, norm, *, options=None):
return self._ext_pdf(var, norm) # TODO
def _integrate(self, var, norm, options):
raise SpecificFunctionNotImplemented
def integrate(self, limits, norm=None, *, var=None, options=None):
var = self._convert_check_input_var(limits, var)
if var.space is None:
raise ValueError(
f"No space is given to integrate of {self}, needs at least one."
)
norm = self._convert_check_input_norm(norm, var=var)
return self._call_integrate(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._auto_integrate(var, norm, options=options)
if self.is_extended:
return (
self._auto_ext_integrate(var, norm, options=options) / self.get_yield()
)
return self._fallback_integrate(var, norm, options=options)
def _auto_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._integrate(var, norm, options=options)
return self._fallback_integrate(var=var, norm=norm, options=options)
def _fallback_integrate(self, var, norm, options):
pass
def _ext_integrate(self, var, norm, options):
raise SpecificFunctionNotImplemented
def _values(self, var=None, options=None):
if self.is_extended:
return self.rel_counts(var=var, options=options)
else:
return self.counts(var=var, options=options)
def counts(self, *, var=None, norm=None, options=None):
return self._call_counts(var=var, norm=norm, options=options)
def _call_counts(self, var=None, norm=None, options=None):
with suppress(SpecificFunctionNotImplemented):
return self._counts(var, norm, options=options) # TODO: auto_value?
return self._call_ext_pdf(var=var, norm=norm, options=options)
def _counts(self, var=None, norm=None, options=None):
raise SpecificFunctionNotImplemented
def rel_counts(self, *, var=None, norm=None, options=None):
return self._call_rel_counts(var=var, norm=norm, options=options)
def _call_rel_counts(self, var=None, norm=None, options=None):
with suppress(SpecificFunctionNotImplemented):
return self._rel_counts(var, norm, options=options) # TODO: auto_value?
return self._fallback_rel_counts(var=var, norm=norm, options=options)
def _rel_counts(self, var=None, norm=None, options=None):
raise SpecificFunctionNotImplemented
def _fallback_rel_counts(self, var, norm, options):
raise WorkInProgressError
def ext_integrate(self, limits, norm=None, *, var=None, options=None):
if not self.is_extended:
raise NotExtendedPDFError
var = self._convert_check_input_var(limits, var)
if var.space is None:
raise ValueError(
f"No space is given to integrate of {self}, needs at least one."
)
norm = self._convert_check_input_norm(norm, var=var)
return self._call_ext_integrate(var=var, norm=norm, options=options)
@z.function(wraps="model")
def _call_ext_integrate(self, var, norm, options):
with suppress(SpecificFunctionNotImplemented):
return self._auto_ext_integrate(var, norm, options=options)
if self.is_extended:
return self._auto_integrate(var, norm, options=options) * self.get_yield()
return self._fallback_ext_integrate(var, norm, options=options)
def _auto_ext_integrate(self, var, norm, options):
return self._ext_integrate(var, norm, options=options)
def _fallback_ext_integrate(self, var, norm, options):
pass # TODO
# return self.integration.mixed(var, norm, options)
def _convert_check_input_var(self, var):
var = ValueHolder(var)
return var # TODO
def _convert_check_input_norm(self, norm, var):
if norm is None:
norm = self.norm
# return var # TODO
class UnbinnedPDF(PDF):
def __init__(self, obs, params=None, var=None, supports=None, extended=None, norm=None):
supports_default = 'ext_pdf' if extended else 'pdf'
if supports is None:
supports = {}
if supports_default not in supports:
supports[supports_default] = {}
if obs is None:
obs_supports = {}
else:
obs_supports = {
axis: VarSupports(var=ob.name, data=True)
for axis, ob in obs.items()
if not isinstance(ob, VarSupports)
}
if params is None:
params_supports = {}
else:
params_supports = {
axis: VarSupports(var=p.name, scalar=True) for axis, p in params.items()
}
if var is None:
var_supports = {}
else:
var_supports = var.copy()
var_supports.update(obs_supports)
var_supports.update(params_supports)
if supports_default not in supports:
supports[supports_default] = var_supports
super().__init__(obs=obs, params=params, var=var, supports=supports, extended=extended, norm=norm)
class HistPDF(PDF):
def __init__(
self,
obs: typing.Mapping[str, ZfitSpace] = None,
params: typing.Mapping[str, ZfitParam] = None,
var: typing.Mapping[str, ZfitVar] = None,
supports: typing.Mapping[str, typing.Mapping[str, VarSupports]] = None,
extended: bool = None,
norm: typing.Mapping[str, ZfitSpace] = None,
label: str | None = None,
):
supports_default = 'counts' if extended else 'rel_counts'
if supports is None:
supports = {}
if supports_default not in supports:
supports[supports_default] = {}
if obs is None:
obs_supports = {}
else:
obs_supports = {
axis: VarSupports(var=ob.name, binned=True)
for axis, ob in obs.items()
if not isinstance(ob, VarSupports)
}
if params is None:
params_supports = {}
else:
params_supports = {
axis: VarSupports(var=p.name, scalar=True) for axis, p in params.items()
}
if var is None:
var_supports = {}
else:
var_supports = var.copy()
var_supports.update(obs_supports)
var_supports.update(params_supports)
supports[supports_default] = var_supports
if 'pdf' not in supports:
supports['pdf'] = {axis: VarSupports(var=v.var, full=True)
for axis, v in supports[supports_default].items()}
if 'ext_pdf' not in supports:
supports['ext_pdf'] = {axis: VarSupports(var=v.var, full=True)
for axis, v in supports[supports_default].items()}
super().__init__(
obs=obs, params=params, var=var, extended=extended, norm=norm, label=label, supports=supports,
)
def _ext_pdf(self, var, norm): # TODO: normalization?
counts = self._call_counts(var=var, norm=norm)
binareas = var.binned.binning.areas
densities = counts / binareas
return densities
def _pdf(self, var, norm): # TODO: normalization?
counts = self._call_rel_counts(var=var, norm=norm)
binareas = var.binned.binning.areas
densities = counts / binareas
return densities
|
impy/ObjectDetectionDataset.py
|
tispratik/impy
| 118 |
144286
|
<reponame>tispratik/impy
"""
Author: <NAME>: <EMAIL>
Description: A class that allows to load a dataset and perform
useful operations with it.
"""
import os
import json
import math
import numpy as np
# from interface import implements
from tqdm import tqdm
try:
from .ObjectDetectionDatasetPreprocessMethods import *
except:
from ObjectDetectionDatasetPreprocessMethods import *
try:
from .ObjectDetectionDatasetStatisticsMethods import *
except:
from ObjectDetectionDatasetStatisticsMethods import *
try:
from .ImagePreprocess import *
except:
from ImagePreprocess import *
try:
from .ImageAnnotation import *
except:
from ImageAnnotation import *
try:
from .VectorOperations import *
except:
from VectorOperations import *
try:
from .Util import *
except:
from Util import *
try:
from .AssertDataTypes import *
except:
from AssertDataTypes import *
try:
from .AugmentationConfigurationFile import *
except:
from AugmentationConfigurationFile import *
try:
from .ApplyAugmentation import applyBoundingBoxAugmentation, applyColorAugmentation
except:
from ApplyAugmentation import applyBoundingBoxAugmentation, applyColorAugmentation
prep = ImagePreprocess()
dataAssertion = AssertDataTypes()
# class ObjectDetectionDataset(implements(ObjectDetectionDatasetPreprocessMethods, \
# ObjectDetectionDatasetStatisticsMethods)):
class ObjectDetectionDataset(object):
def __init__(self, imagesDirectory = None, annotationsDirectory = None, databaseName = None):
"""
A high level data structure used for image localization datasets.
Args:
imagesDirectory = None,
annotationsDirectory = None,
databaseName = None
Returns:
None
"""
super(ObjectDetectionDataset, self).__init__()
# Assert images and annotations
if (not os.path.isdir(imagesDirectory)):
raise Exception("Path to images does not exist.")
if (not os.path.isdir(annotationsDirectory)):
raise Exception("Path to annotations does not exist.")
if (databaseName == None):
databaseName = "Unspecified"
# Class variables
self.imagesDirectory = imagesDirectory
self.annotationsDirectory = annotationsDirectory
self.databaseName = databaseName
# Preprocessing.
def dataConsistency(self):
"""
Checks whether data is consistent. It analyses if there is the same amount of
of images and annotations. Then it reviews if the annotation and image names
are consistent with each other.
Args:
None
Returns:
None
Raises:
- Exception: when the extension of the image is not allowed. Only jpgs and pngs are allowed.
- Exception: When an annotation file does not have a .xml extension.
- Exception: When the amount of annotations and images is not equal.
- Exception: When there are images that don't have annotations.
- Exception: When there are annotations that don't have images.
"""
# Local variables.
images = []
annotations = []
# Preprocess images.
for image in tqdm(os.listdir(self.imagesDirectory)):
# Extract name.
extension = Util.detect_file_extension(filename = image)
if (extension == None):
raise Exception("Your image extension is not valid: {}".format(extension) +\
" Only jpgs and pngs are allowed.")
images.append(image.split(extension)[0])
# Preprocess annotations.
for annotation in tqdm(os.listdir(self.annotationsDirectory)):
if (not annotation.endswith(".xml")):
raise Exception("Only xml annotations are allowed: {}".format(annotation))
annotations.append(annotation.split(".xml")[0])
# Convert lists to sets.
imagesSet = set(images)
annotationsSet = set(annotations)
# Check name consistency.
imgToAnnt = imagesSet.difference(annotationsSet)
anntToImg = annotationsSet.difference(imagesSet)
# Check size consistency.
if (len(imagesSet) != len(annotationsSet)):
print("Images to annotations: ", imgToAnnt)
print("Annotations to images: ", anntToImg)
raise Exception("The amount of images({}) and annotations({}) is not equal."\
.format(len(imagesSet), len(annotationsSet)))
if (len(imgToAnnt) != 0):
raise Exception("There are more images than annotations: {}".format(imgToAnnt))
if (len(anntToImg) != 0):
raise Exception("There are more annotations than images: {}".format(anntToImg))
def findEmptyOrWrongAnnotations(self, removeEmpty = None):
"""
Find empty or irregular annotations in the annotation files. An empty
annotation is an annotation that includes no objects. And a irregular
annotation is an annotation that has a bounding box with coordinates that
are off the image's boundaries.
Args:
removeEmpty: A boolean that if True removes the annotation and image that are empty.
Returns:
None
Raises:
- Exception: when the extension of the image is not allowed. Only jpgs and pngs are allowed.
- Exception: when an annotation file is empty.
- Exception: when a coordinate is not valid. Either less than zero or greater than image's size.
"""
# Assertions
if (removeEmpty == None):
removeEmpty = False
# Local variables
emptyAnnotations = []
files = os.listdir(self.imagesDirectory)
# Logic
for file in tqdm(files):
# In case a folder is found, report it.
if (os.path.isdir(file)):
continue
# Otherwise, continue.
extension = Util.detect_file_extension(filename = file)
if (extension == None):
raise Exception("ERROR: Your image extension is not valid: {}".format(extension) +\
" Only jpgs and pngs are allowed.")
# Extract name
filename = os.path.split(file)[1].split(extension)[0]
# Create xml and img name
imgFullPath = os.path.join(self.imagesDirectory, filename + extension)
xmlFullPath = os.path.join(self.annotationsDirectory, filename + ".xml")
# Create an object of ImageAnnotation.
annt = ImageAnnotation(path = xmlFullPath)
# Check if it is empty.
if (len(annt.propertyBoundingBoxes) == 0):
emptyAnnotations.append(file)
print("WARNING: Annotation {} does not have any annotations.".format(xmlFullPath))
# Check if we need to remove this annotation.
if (removeEmpty == True):
#os.remove(imgFullPath)
os.remove(xmlFullPath)
# Check if it is irregular
height, width, depth = annt.propertySize
for each in annt.propertyBoundingBoxes:
ix, iy, x, y = each
if (ix < 0):
raise ValueError("ERROR: Negative coordinate found in {}".format(file))
if (iy < 0):
raise ValueError("ERROR: Negative coordinate found in {}".format(file))
if (x > width):
raise ValueError("ERROR: Coordinate {} bigger than width {} found in {}"\
.format(x, width, file))
if (y > height):
raise ValueError("ERROR: Coordinate {} bigger than height {} found in {}"\
.format(y, height, file))
# Return empty annotations
return emptyAnnotations
# Stats.
def computeBoundingBoxStats(self, saveDataFrame = None, outputDirDataFrame = None):
"""
Compute basic stats for the dataset's bounding boxes.
Args:
saveDataFrame: A boolean that defines whether to save the dataframe or not.
outputDirDataFrame: A string that contains the path where the dataframe will
be saved.
Returns:
None
"""
# Assertions
if (saveDataFrame == None):
saveDataFrame = False
else:
if (type(saveDataFrame) == bool):
if (outputDirDataFrame == None):
raise ValueError("Parameter directory dataframe cannot be empty.")
else:
raise TypeError("saveDataFrame must be of type bool.")
# Local variables
namesFrequency = {}
files = os.listdir(self.imagesDirectory)
columns = ["path", "name", "width", "height", "xmin", "ymin", "xmax", "ymax"]
paths = []
names = []
widths = []
heights = []
boundingBoxesLists = []
# Logic
for file in tqdm(files):
extension = Util.detect_file_extension(filename = file)
if (extension == None):
raise Exception("ERROR: Your image extension is not valid: {}".format(extension) +\
" Only jpgs and pngs are allowed.")
# Extract name.
filename = os.path.split(file)[1].split(extension)[0]
# Create xml and img name.
imgFullPath = os.path.join(self.imagesDirectory, filename + extension)
xmlFullPath = os.path.join(self.annotationsDirectory, filename + ".xml")
# Create an object of ImageAnnotation.
annt = ImageAnnotation(path = xmlFullPath)
# Check if it is empty.
boundingBoxes = annt.propertyBoundingBoxes
names = annt.propertyNames
height, width, depth = annt.propertySize
for i in range(len(names)):
if (not (names[i] in namesFrequency)):
namesFrequency[names[i]] = 0
else:
namesFrequency[names[i]] += 1
paths.append(file)
names.append(names[i])
widths.append(width)
heights.append(height)
boundingBoxesLists.append(boundingBoxes[i])
# Print stats.
print("Total number of bounding boxes: {}"\
.format(sum([i for i in namesFrequency.values()])))
print("Unique classes: {}".format(namesFrequency))
# Save data?
if (saveDataFrame):
Util.save_lists_in_dataframe(columns = columns,
data = [paths, names, widths, heights, boundingBoxesLists],
output_directory = outputDirDataFrame)
# Save bounding boxes as files.
def saveBoundingBoxes(self, outputDirectory = None, filterClasses = None):
"""
Saves the bounding boxes as images of each image in the dataset.
Args:
outputDirectory: A string that contains the directory where the images will be saved.
filterClasses: A list of Strings that contains names of the classes to be filtered and saved.
Returns:
None
"""
# Assertions
if (outputDirectory == None):
raise ValueError("outputDirectory cannot be empty")
if (type(outputDirectory) != str):
raise TyperError("outputDirectory must be a string.")
if (not (os.path.isdir(outputDirectory))):
raise FileNotFoundError("outputDirectory's path does not exist: ".format(outputDirectory))
if (filterClasses == None):
filterClasses = []
if (type(filterClasses) != list):
raise TyperError("filterClasses must be of type list.")
# Local variables
images = [os.path.join(self.imagesDirectory, i) for i in os.listdir(self.imagesDirectory)]
# Logic
for img in tqdm(images):
# Get extension
extension = Util.detect_file_extension(filename = img)
if (extension == None):
raise Exception("ERROR: Your image extension is not valid." +\
"Only jpgs and pngs are allowed.")
# Extract name
filename = os.path.split(img)[1].split(extension)[0]
# Create xml and img name
imgFullPath = os.path.join(self.imagesDirectory, filename + extension)
xmlFullPath = os.path.join(self.annotationsDirectory, filename + ".xml")
# Load annotation.
annt = ImageAnnotation(path = xmlFullPath)
# Get bounding boxes.
boundingBoxes = annt.propertyBoundingBoxes
names = annt.propertyNames
# Save image.
frame = cv2.imread(img)
# Save bounding boxes as png images.
for name, boundingBox in zip(names, boundingBoxes):
if ((len(filterClasses) == 0) or (name in filterClasses)):
ix, iy, x, y = boundingBox
# Detect extension.
extension = Util.detect_file_extension(filename = img)
if (extension == None):
raise Exception("Your image extension is not valid. " +\
"Only jpgs and pngs are allowed. {}".format(extension))
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
# Check bounding box does not get out of boundaries.
if (x == frame.shape[1]):
x -= 1
if (y == frame.shape[0]):
y -= 1
# Check bounding boxes are ok.
if (((y-iy) == 0) or ((x - ix) == 0) or \
((ix < 0) or (iy < 0)) or \
((x > frame.shape[1]) or (y > frame.shape[0]))):
print(img)
print(ix, iy, x, y)
raise Exception("Bounding box does not exist.")
# Save image.
Util.save_img(frame = frame[iy:y, ix:x, :],
img_name = imgName,
output_image_directory = outputDirectory)
# Reduce and data augmentation.
def reduceDatasetByRois(self, offset = None, outputImageDirectory = None, outputAnnotationDirectory = None):
"""
Reduce that images of a dataset by grouping its bounding box annotations and
creating smaller images that contain them.
Args:
offset: An int that contains the amount of pixels in which annotations
can be grouped.
outputImageDirectory: A string that contains the path to the directory
where the images will be stored.
outputAnnotationDirectory: A string that contains the path to the directory
where the annotations will be stored.
Returns:
None
"""
# Assertions
if (offset == None):
raise ValueError("Offset parameter cannot be empty.")
if (outputImageDirectory == None):
outputImageDirectory = os.getcwd()
Util.create_folder(os.path.join(outputImageDirectory, "images"))
outputImageDirectory = os.path.join(os.getcwd(), "images")
if (not (os.path.isdir(outputImageDirectory))):
raise Exception("Path to output directory does not exist. {}"\
.format(outputImageDirectory))
if (outputAnnotationDirectory == None):
outputAnnotationDirectory = os.getcwd()
Util.create_folder(os.path.join(outputAnnotationDirectory, "annotations"))
Util.create_folder(os.path.join(outputAnnotationDirectory, "annotations", "xmls"))
outputAnnotationDirectory = os.path.join(os.getcwd(), "annotations", "xmls")
if (not (os.path.isdir(outputAnnotationDirectory))):
raise Exception("Path to output annotation directory does not exist. {}"\
.format(outputAnnotationDirectory))
# Get images and annotations full paths
imagesPath = [os.path.join(self.imagesDirectory, each) for each in \
os.listdir(self.imagesDirectory)]
for img in tqdm(imagesPath):
#print(img)
# Get extension
extension = Util.detect_file_extension(filename = img)
if (extension == None):
raise Exception("Your image extension is not valid." +\
"Only jpgs and pngs are allowed.")
# Extract name
filename = os.path.split(img)[1].split(extension)[0]
# Create xml and img name
imgFullPath = os.path.join(self.imagesDirectory, filename + extension)
xmlFullPath = os.path.join(self.annotationsDirectory, filename + ".xml")
self.reduceImageDataPointByRoi(imagePath = imgFullPath,
annotationPath = xmlFullPath,
offset = offset,
outputImageDirectory = outputImageDirectory,
outputAnnotationDirectory = outputAnnotationDirectory)
def reduceImageDataPointByRoi(self, imagePath = None, annotationPath = None, offset = None, outputImageDirectory = None, outputAnnotationDirectory = None):
"""
Group an image's bounding boxes into Rois and create smaller images.
Args:
imagePath: A string that contains the path to an image.
annotationPath: A string that contains the path to an annotation.
offset: An int that contains the offset.
outputImageDirectory: A string that contains the path where the images
will be stored.
outputAnnotationDirectory: A string that contains the path where the annotations
will be stored.
Returns:
None
Example:
Given an image and its bounding boxes, create ROIs of size offset
that enclose the maximum possible amount of bounding boxes.
--------------------------------- --------------------------------
| | | |
| --- | | Roi0------ |
| | | | | | | | |
| --- | | |--- | |
| | | | --- | |
| --- | -> | | | | | |
| | | | | | --- | |
| --- | | ------Roi0 |
| | | |
| | | |
| | | |
| --- | | Roi1---- |
| | | | | | | |
| --- | | | | |
| | | | --- | |
| | | | | | | |
| | | | --- | |
| | | ----Roi1 |
--------------------------------- ---------------------------------
Then, the rois are saved with their respective annotations.
"""
# Assertions
if (imagePath == None):
raise ValueError("ERROR: Path to imagePath parameter cannot be empty.")
if (annotationPath == None):
raise ValueError("ERROR: Path to annotation parameter cannot be empty.")
if (not os.path.isfile(imagePath)):
raise ValueError("ERROR: Path to image does not exist {}.".format(imagePath))
if (not os.path.isfile(annotationPath)):
raise ValueError("ERROR: Path to annotation does not exist {}.".format(annotationPath))
if (offset == None):
raise ValueError("ERROR: Offset parameter cannot be empty.")
if (not (os.path.isdir(outputImageDirectory))):
raise ValueError("ERROR: Output image directory does not exist.")
if (not (os.path.isdir(outputAnnotationDirectory))):
raise ValueError("ERROR: Output annotation directory does not exist.")
# Load image annotation.
annotation = ImageAnnotation(path = annotationPath)
height, width, depth = annotation.propertySize
names = annotation.propertyNames
objects = annotation.propertyObjects
boundingBoxes = annotation.propertyBoundingBoxes
# Create a list of classes with the annotations.
annotations = []
index = 0
for boundingBox, name in zip(boundingBoxes, names):
# Compute the module
ix, iy, x, y = boundingBox
module = VectorOperations.compute_module(vector = [ix, iy])
annotations.append(Annotation(name = name, bndbox = boundingBox, \
module = module, corePoint = True))
index += 1
# Sort the list of Annotations by its module from lowest to highest.
for i in range(len(annotations)):
for j in range(len(annotations)-1):
module0 = annotations[j].propertyModule
module1 = annotations[j+1].propertyModule
if (module0 >= module1):
# Swap Annotation
aux = annotations[j+1]
annotations[j+1] = annotations[j]
annotations[j] = aux
# Debug
# for each in annotations:
# print(each.propertyName, each.propertyModule)
# print("\n")
# Work on the points.
for i in range(len(annotations)):
# Ignore non-core points.
if (annotations[i].propertyCorePoint == False):
pass
else:
# Center the core point in an allowed image space.
RoiXMin, RoiYMin, \
RoiXMax, RoiYMax = prep.adjustImage(frameHeight = height,
frameWidth = width,
boundingBoxes = [annotations[i].propertyBndbox],
offset = offset)
# Find the annotations that can be included in the allowed image space.
for j in range(len(annotations)):
# Get bounding box.
ix, iy, x, y = annotations[j].propertyBndbox
# Check current bounding box is inside the allowed space.
if ((ix >= RoiXMin) and (x <= RoiXMax)) and \
((iy >= RoiYMin) and (y <= RoiYMax)):
# Disable point from being a core point. Check it is not the
# current point of reference.
if (not (annotations[i].propertyBndbox == annotations[j].propertyBndbox)):
annotations[j].propertyCorePoint = False
# Include the corresponding bounding boxes in the region of interest.
newBoundingBoxes, \
newNames = prep.includeBoundingBoxes(edges = [RoiXMin, RoiYMin, RoiXMax, RoiYMax],
boundingBoxes = boundingBoxes,
names = names)
if (len(newBoundingBoxes) == 0):
print(boundingBoxes)
print(RoiXMin, RoiYMin, RoiXMax, RoiYMax)
raise Exception("ERROR: No bounding boxes: {}. Please report this problem.".format(imagePath))
# Read image.
frame = cv2.imread(imagePath)
extension = Util.detect_file_extension(filename = imagePath)
if (extension == None):
raise Exception("Your image extension is not valid. " +\
"Only jpgs and pngs are allowed. {}".format(extension))
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame[RoiYMin:RoiYMax, RoiXMin:RoiXMax, :],
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame[RoiYMin:RoiYMax, RoiXMin:RoiXMax, :].shape,
data_augmentation_type = "Unspecified",
bounding_boxes = newBoundingBoxes,
names = newNames,
origin = imagePath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
def applyDataAugmentation(self, configurationFile = None, outputImageDirectory = None, outputAnnotationDirectory = None, threshold = None):
"""
Applies one or multiple data augmentation methods to the dataset.
Args:
configurationFile: A string with a path to a json file that contains the
configuration of the data augmentation methods.
outputImageDirectory: A string that contains the path to the directory where
images will be saved.
outputAnnotationDirectory: A string that contains the path the directory where
annotations will be saved.
threshold: A float that contains a number between 0 and 1.
Returns:
None
"""
# Assertions
if (configurationFile == None):
raise ValueError("ERROR: Augmenter parameter cannot be empty.")
else:
if (not os.path.isfile(configurationFile)):
raise Exception("ERROR: Path to json file ({}) does not exist."\
.format(configurationFile))
jsonConf = AugmentationConfigurationFile(file = configurationFile)
typeAugmentation = jsonConf.runAllAssertions()
if (outputImageDirectory == None):
outputImageDirectory = os.getcwd()
Util.create_folder(os.path.join(outputImageDirectory, "images"))
outputImageDirectory = os.path.join(os.getcwd(), "images")
if (not (os.path.isdir(outputImageDirectory))):
raise Exception("ERROR: Path to output directory does not exist. {}"\
.format(outputImageDirectory))
if (outputAnnotationDirectory == None):
outputAnnotationDirectory = os.getcwd()
Util.create_folder(os.path.join(outputAnnotationDirectory, "annotations"))
Util.create_folder(os.path.join(outputAnnotationDirectory, "annotations", "xmls"))
outputAnnotationDirectory = os.path.join(os.getcwd(), "annotations", "xmls")
if (not (os.path.isdir(outputAnnotationDirectory))):
raise Exception("ERROR: Path to output annotation directory does not exist. {}"\
.format(outputAnnotationDirectory))
if (threshold == None):
threshold = 0.5
if (type(threshold) != float):
raise TyperError("ERROR: threshold parameter must be of type float.")
if ((threshold > 1) or (threshold < 0)):
raise ValueError("ERROR: threshold paramater should be a number between" +\
" 0-1.")
# Load configuration data.
f = open(configurationFile)
data = json.load(f)
f.close()
# Iterate over the images.
for img in tqdm(os.listdir(self.imagesDirectory)):
# Get the extension
extension = Util.detect_file_extension(filename = img)
if (extension == None):
raise Exception("ERROR: Your image extension is not valid." +\
"Only jpgs and pngs are allowed.")
# Extract name.
filename = os.path.split(img)[1].split(extension)[0]
# Create xml and img name.
imgFullPath = os.path.join(self.imagesDirectory, filename + extension)
xmlFullPath = os.path.join(self.annotationsDirectory, filename + ".xml")
imgAnt = ImageAnnotation(path = xmlFullPath)
boundingBoxes = imgAnt.propertyBoundingBoxes
names = imgAnt.propertyNames
# Apply augmentation.
if (typeAugmentation == 0):
for i in data["bounding_box_augmenters"]:
if (i == "Sequential"):
# Prepare data for sequence
frame = cv2.imread(imgFullPath)
bndboxes = boundingBoxes
# Read elements of vector
assert type(data["bounding_box_augmenters"][i]) == list, "Not list"
for k in range(len(data["bounding_box_augmenters"][i])):
# Extract information
augmentationType = list(data["bounding_box_augmenters"][i][k].keys())[0]
if (not jsonConf.isValidBoundingBoxAugmentation(augmentation = augmentationType)):
raise Exception("ERROR: {} is not valid.".format(augmentationType))
parameters = data["bounding_box_augmenters"][i][k][augmentationType]
# Save?
saveParameter = jsonConf.extractSavingParameter(parameters = parameters)
frame, bndboxes = applyBoundingBoxAugmentation(frame = frame,
boundingBoxes = bndboxes,
augmentationType = augmentationType, #j,
parameters = parameters)
if (saveParameter == True):
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame,
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame.shape,
data_augmentation_type = augmentationType,
bounding_boxes = bndboxes,
names = names,
origin = imgFullPath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
else:
parameters = data["bounding_box_augmenters"][i]
# Save?
saveParameter = jsonConf.extractSavingParameter(parameters = parameters)
frame, bndboxes = applyBoundingBoxAugmentation(frame = cv2.imread(imgFullPath),
boundingBoxes = boundingBoxes,
augmentationType = i,
parameters = parameters)
# Save frame
if (saveParameter == True):
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame,
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame.shape,
data_augmentation_type = augmentationType,
bounding_boxes = bndboxes,
names = names,
origin = imgFullPath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
elif (typeAugmentation == 1):
# Geometric data augmentations
raise ValueError("Image geometric data augmentations are not " +\
"supported for bounding boxes. Use bounding box " +\
"augmentation types.")
elif (typeAugmentation == 2):
# Color data augmentations
for i in data["image_color_augmenters"]:
if (i == "Sequential"):
# Prepare data for sequence
frame = cv2.imread(imgFullPath)
# Read elements of vector
assert type(data["image_color_augmenters"][i]) == list, "Not list"
for k in range(len(data["image_color_augmenters"][i])):
# Extract information
augmentationType = list(data["image_color_augmenters"][i][k].keys())[0]
if (not jsonConf.isValidColorAugmentation(augmentation = augmentationType)):
raise Exception("ERROR: {} is not valid.".format(augmentationType))
parameters = data["image_color_augmenters"][i][k][augmentationType]
# Save?
saveParameter = jsonConf.extractSavingParameter(parameters = parameters)
# Apply augmentation
frame = applyColorAugmentation(frame = frame,
augmentationType = augmentationType, #j,
parameters = parameters)
if (saveParameter == True):
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame,
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame.shape,
data_augmentation_type = augmentationType,
bounding_boxes = bndboxes,
names = names,
origin = imgFullPath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
else:
parameters = data["image_color_augmenters"][i]
# Save?
saveParameter = jsonConf.extractSavingParameter(parameters = parameters)
frame = applyColorAugmentation(frame = cv2.imread(imgFullPath),
augmentationType = i,
parameters = parameters)
# Save frame
if (saveParameter == True):
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame,
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame.shape,
data_augmentation_type = augmentationType,
bounding_boxes = bndboxes,
names = names,
origin = imgFullPath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
elif (typeAugmentation == 3):
# Assert sequential follows multiple_image_augmentations.
if (not ("Sequential" in data["multiple_image_augmentations"])):
raise Exception("ERROR: Data after multiple_image_augmentations is not recognized.")
# Multiple augmentation configurations, get a list of hash maps of all the confs.
list_of_augmenters_confs = data["multiple_image_augmentations"]["Sequential"]
# Assert list_of_augmenters_confs is a list.
if (not (type(list_of_augmenters_confs) == list)):
raise TypeError("ERROR: Data inside [multiple_image_augmentations][Sequential] must be a list.")
# Prepare data for sequence.
frame = cv2.imread(imgFullPath)
bndboxes = boundingBoxes
# print("\n*", list_of_augmenters_confs, "\n")
for k in range(len(list_of_augmenters_confs)):
# Get augmenter type ("bounding_box_augmenter" or "color_augmenter") position
# in the list of multiple augmentations.
augmentationConf = list(list_of_augmenters_confs[k].keys())[0]
if (not (jsonConf.isBndBxAugConfFile(keys = [augmentationConf]) or
jsonConf.isColorConfFile(keys = [augmentationConf]))):
raise Exception("{} is not a valid configuration.".format(augmentationConf))
# Get sequential information from there. This information is a list of
# the types of augmenters that belong to augmentationConf.
list_of_augmenters_confs_types = list_of_augmenters_confs[k][augmentationConf]["Sequential"]
# Assert list_of_augmenters_confs is a list
if (not (type(list_of_augmenters_confs_types) == list)):
raise TypeError("Data inside [multiple_image_augmentations][Sequential][{}][Sequential] must be a list."\
.format(augmentationConf))
# Iterate over augmenters inside sequential of type.
for l in range(len(list_of_augmenters_confs_types)):
# Get augmentation type and its parameters.
augmentationType = list(list_of_augmenters_confs_types[l].keys())[0]
# Assert augmentation is valid.
if (not (jsonConf.isValidBoundingBoxAugmentation(augmentation = augmentationType) or
jsonConf.isValidColorAugmentation(augmentation = augmentationType))):
raise Exception("ERROR: {} is not valid.".format(augmentationType))
parameters = list_of_augmenters_confs_types[l][augmentationType]
# Save?
saveParameter = jsonConf.extractSavingParameter(parameters = parameters)
# Restart frame to original?
restartFrameParameter = jsonConf.extractRestartFrameParameter(parameters = parameters)
# Probability of augmentation happening.
randomEvent = jsonConf.randomEvent(parameters = parameters, threshold = threshold)
# print(augmentationType, parameters)
# Apply augmentation.
if (augmentationConf == "image_color_augmenters"):
# print(augmentationConf, augmentationType, parameters)
if (randomEvent == True):
frame = applyColorAugmentation(frame = frame,
augmentationType = augmentationType,
parameters = parameters)
elif (augmentationConf == "bounding_box_augmenters"):
# print(augmentationConf, augmentationType, parameters)
if (randomEvent == True):
frame, bndboxes = applyBoundingBoxAugmentation(frame = frame,
boundingBoxes = bndboxes,
augmentationType = augmentationType, #j,
parameters = parameters)
# Save?
if ((saveParameter == True) and (randomEvent == True)):
# Generate a new name.
newName = Util.create_random_name(name = self.databaseName, length = 4)
imgName = newName + extension
xmlName = newName + ".xml"
# Save image.
Util.save_img(frame = frame,
img_name = imgName,
output_image_directory = outputImageDirectory)
# Save annotation.
Util.save_annotation(filename = imgName,
path = os.path.join(outputImageDirectory, imgName),
database_name = self.databaseName,
frame_size = frame.shape,
data_augmentation_type = augmentationType,
bounding_boxes = bndboxes,
names = names,
origin = imgFullPath,
output_directory = os.path.join(outputAnnotationDirectory, xmlName))
# Restart frame?
if (restartFrameParameter == True):
frame = cv2.imread(imgFullPath)
bndboxes = boundingBoxes
else:
raise Exception("Type augmentation {} not valid.".format(typeAugmentation))
class Annotation(object):
def __init__(self, name = None, bndbox = None, module = None, corePoint = None):
"""
A class that holds parameters of a common annotation.
Args:
name: A string that contains a name.
bndbox: A list of ints.
module: A float.
corePoint: A boolean.
Returns:
None
"""
super(Annotation, self).__init__()
# Assertions
if (name == None):
raise ValueError("Name parameter cannot be empty.")
if (bndbox == None):
raise ValueError("Bounding box parameter cannot be empty.")
if (module == None):
module = -1
if (corePoint == None):
raise ValueError("corePoint parameter cannot be empty.")
# Class variables
self.name = name
self.bndbox = bndbox
self.module = module
self.corePoint = corePoint
self.otherAnnotations = []
self.otherAnnotationsName = []
@property
def propertyModule(self):
return self.module
@property
def propertyName(self):
return self.name
@property
def propertyBndbox(self):
return self.bndbox
@property
def propertyModule(self):
return self.module
@propertyModule.setter
def propertyModule(self, module):
self.module = module
@property
def propertyCorePoint(self):
return self.corePoint
@propertyCorePoint.setter
def propertyCorePoint(self, corePoint):
self.corePoint = corePoint
@property
def propertyOtherAnnotation(self):
return self.otherAnnotations
def includeOtherAnnotation(self, annt):
self.otherAnnotations.append(annt)
@property
def propertyOtherAnnotationName(self):
return self.otherAnnotationsName
def includeOtherAnnotationName(self, name):
self.otherAnnotationsName.append(name)
|
TekkenEncyclopedia(ThrowBreakVersion).py
|
Sam-Si/TekkenBot
| 211 |
144291
|
"""
Collects information from TekkenGameState over time in hopes of synthesizing it and presenting it in a more useful way.
"""
import time
from enum import Enum
import artificial_keyboard
from MoveInfoEnums import AttackType
from MoveInfoEnums import ComplexMoveStates
from MoveInfoEnums import ThrowTechs
from TekkenGameState import TekkenGameState
class TekkenEncyclopedia:
def __init__(self, isPlayerOne=False, print_extended_frame_data=False):
self.FrameData = {}
self.GameEvents = []
self.current_game_event = None
self.isPlayerOne = isPlayerOne
self.print_extended_frame_data = print_extended_frame_data
self.active_frame_wait = 1
self.was_fight_being_reacquired = True
self.is_match_recorded = False
self.stat_filename = "TekkenData/matches.txt"
if self.isPlayerOne:
self.LoadStats()
self.current_punish_window = None
self.PunishWindows = []
self.current_frame_data_entry = None
self.previous_frame_data_entry = None
def LoadStats(self):
self.stat_dict = {}
self.stat_dict['char_stats'] = {}
self.stat_dict['matchup_stats'] = {}
self.stat_dict['opponent_stats'] = {}
try:
with open(self.stat_filename, 'r', encoding='utf-8') as fr:
lines = fr.readlines()
for line in lines:
if '|' in line:
args = line.split('|')
result = args[0].strip()
player_char = args[2].strip()
opponent_name = args[4].strip()
opponent_char = args[5].strip()
self.AddStat(result, player_char, opponent_name, opponent_char)
except FileNotFoundError:
pass
def AddStat(self, result, player_char, opponent_name, opponent_char):
if not opponent_char in self.stat_dict['char_stats']:
self.stat_dict['char_stats'][opponent_char] = [0, 0, 0]
if not opponent_name in self.stat_dict['opponent_stats']:
self.stat_dict['opponent_stats'][opponent_name] = [0, 0, 0]
matchup_string = "{} vs {}".format(player_char, opponent_char)
if not matchup_string in self.stat_dict['matchup_stats']:
self.stat_dict['matchup_stats'][matchup_string] = [0, 0, 0]
if 'WIN' in result:
index = 0
elif 'LOSS' in result:
index = 1
else:
index = 2
self.stat_dict['char_stats'][opponent_char][index] += 1
self.stat_dict['opponent_stats'][opponent_name][index] += 1
self.stat_dict['matchup_stats'][matchup_string][index] += 1
def RecordFromStat(self, catagory, lookup):
try:
stats = self.stat_dict[catagory][lookup]
wins = stats[0]
losses = stats[1]
draws = stats[2]
except:
wins = 0
losses = 0
draws = 0
if draws <= 0:
return "{} - {}".format(wins, losses)
else:
return "{} - {} - {}".format(wins, losses, draws)
def GetPlayerString(self, reverse=False):
if (self.isPlayerOne and not reverse) or (not self.isPlayerOne and reverse):
return "p1: "
else:
return "p2: "
def GetFrameAdvantage(self, moveId, isOnBlock=True):
if moveId in self.FrameData:
if isOnBlock:
return self.FrameData[moveId].onBlock
else:
return self.FrameData[moveId].onNormalHit
else:
return None
# Set the dummy to jump and hold up and this prints the frame difference.
def CheckJumpFrameDataFallback(self, gameState):
if not self.isPlayerOne:
if gameState.IsFulfillJumpFallbackConditions():
print("p1 jump frame diff: " + str(gameState.GetBotMoveTimer() - gameState.GetOppMoveTimer()))
def Update(self, gameState: TekkenGameState):
if self.isPlayerOne:
gameState.FlipMirror()
# self.CheckJumpFrameDataFallback(gameState)
self.DetermineFrameData(gameState)
self.DetermineGameStats(gameState)
self.DetermineCoachingTips(gameState)
if self.isPlayerOne:
gameState.FlipMirror()
def DetermineCoachingTips(self, gameState: TekkenGameState):
if self.previous_frame_data_entry != self.current_frame_data_entry:
self.previous_frame_data_entry = self.current_frame_data_entry
if self.current_punish_window != None:
self.ClosePunishWindow(PunishWindow.Result.NO_WINDOW, do_close_frame_data_entries=False)
# if int(self.current_frame_data_entry.currentFrameAdvantage) <= 999999:
self.current_punish_window = PunishWindow(self.current_frame_data_entry.prefix,
self.current_frame_data_entry.move_id,
self.current_frame_data_entry.input,
int(self.current_frame_data_entry.hitRecovery),
int(self.current_frame_data_entry.blockRecovery),
int(self.current_frame_data_entry.activeFrames))
self.PunishWindows.append(self.current_punish_window)
self.punish_window_counter = 0
if self.current_punish_window != None:
self.punish_window_counter += 1
# if self.punish_window_counter > self.current_punish_window.size:
was_block_punish = gameState.DidOppStartGettingPunishedXFramesAgo(
1) or gameState.DidOppStartGettingHitXFramesAgo(1)
if was_block_punish:
leeway = (gameState.OppFramesUntilRecoveryXFramesAgo(2) - 1)
LAUNCH_PUNISHIBLE = 15
BAD_PUNISH_THRESHOLD = 13
# if leeway == 0:
# self.ClosePunishWindow(PunishWindow.Result.PERFECT_PUNISH)
# else:
fa = (-1 * self.current_punish_window.get_frame_advantage())
startup = fa - leeway
if fa >= LAUNCH_PUNISHIBLE and startup <= BAD_PUNISH_THRESHOLD:
self.ClosePunishWindow(PunishWindow.Result.NO_LAUNCH_ON_LAUNCHABLE)
elif fa >= LAUNCH_PUNISHIBLE:
self.ClosePunishWindow(PunishWindow.Result.LAUNCH_ON_LAUNCHABLE)
else:
self.ClosePunishWindow(PunishWindow.Result.JAB_ON_NOT_LAUNCHABLE)
elif gameState.HasOppReturnedToNeutralFromMoveId(
self.current_punish_window.move_id) and self.punish_window_counter >= self.current_punish_window.hit_recovery:
if self.current_punish_window.get_frame_advantage() <= -10:
self.ClosePunishWindow(PunishWindow.Result.NO_PUNISH)
else:
self.ClosePunishWindow(PunishWindow.Result.NO_WINDOW)
if self.current_punish_window != None:
self.current_punish_window.adjust_window(gameState.GetOppFramesTillNextMove(),
gameState.GetBotFramesTillNextMove())
# perfect_punish = False
# if was_block_punish:
# perfect_punish = gameState.WasBotMoveOnLastFrameXFramesAgo(2)
def ClosePunishWindow(self, result, do_close_frame_data_entries=True):
self.current_punish_window.close_window(result)
self.current_punish_window = None
if do_close_frame_data_entries:
self.previous_frame_data_entry = None
self.current_frame_data_entry = None
def DetermineGameStats(self, gameState: TekkenGameState):
frames_ago = 4
if self.current_game_event == None:
if gameState.DidOppComboCounterJustStartXFramesAgo(frames_ago):
gameState.BackToTheFuture(frames_ago)
combo_counter_damage = gameState.GetOppComboDamageXFramesAgo(1)
was_unblockable = gameState.IsOppAttackUnblockable()
was_antiair = gameState.IsOppAttackAntiair()
was_block_punish = gameState.DidBotStartGettingPunishedXFramesAgo(1)
perfect_punish = False
if was_block_punish:
perfect_punish = gameState.BotFramesUntilRecoveryXFramesAgo(2) == 1
was_counter_hit = gameState.IsBotGettingCounterHit()
was_ground_hit = gameState.IsBotGettingHitOnGround()
was_whiff_punish = gameState.GetBotStartupXFramesAgo(2) > 0
was_low_hit = gameState.IsOppAttackLow()
was_mid_hit_on_crouching = gameState.IsOppAttackMid() and gameState.IsBotCrouching()
was_throw = gameState.IsBotBeingThrown()
was_damaged_during_attack = gameState.DidOppTakeDamageDuringStartup()
gameState.ReturnToPresent()
if was_unblockable:
hit = GameStatEventEntry.EntryType.UNBLOCKABLE
elif was_antiair:
hit = GameStatEventEntry.EntryType.ANTIAIR
elif was_throw:
hit = GameStatEventEntry.EntryType.THROW
elif was_damaged_during_attack:
hit = GameStatEventEntry.EntryType.POWER_CRUSHED
elif was_block_punish:
hit = GameStatEventEntry.EntryType.PUNISH
elif was_counter_hit:
hit = GameStatEventEntry.EntryType.COUNTER
elif was_ground_hit:
hit = GameStatEventEntry.EntryType.GROUND
elif was_whiff_punish:
hit = GameStatEventEntry.EntryType.WHIFF_PUNISH
elif was_low_hit:
hit = GameStatEventEntry.EntryType.LOW
elif was_mid_hit_on_crouching:
hit = GameStatEventEntry.EntryType.MID
else:
hit = GameStatEventEntry.EntryType.NO_BLOCK
self.current_game_event = GameStatEventEntry(gameState.stateLog[-1].timer_frames_remaining,
self.GetPlayerString(True), hit, combo_counter_damage)
# print("event open")
else:
bot_damage_taken = gameState.DidBotJustTakeDamage(frames_ago + 1)
if bot_damage_taken > 0:
# print('armored')
game_event = GameStatEventEntry(gameState.stateLog[-1].timer_frames_remaining,
self.GetPlayerString(True), GameStatEventEntry.EntryType.ARMORED,
0) # this is probably gonna break for Yoshimitsu's self damage moves
game_event.close_entry(gameState.stateLog[-1].timer_frames_remaining, 1, bot_damage_taken, 0,
len(self.GameEvents))
self.GameEvents.append(game_event)
else:
if gameState.DidOppComboCounterJustEndXFramesAgo(frames_ago) or gameState.WasFightReset():
hits = gameState.GetOppComboHitsXFramesAgo(frames_ago + 1)
damage = gameState.GetOppComboDamageXFramesAgo(frames_ago + 1)
juggle = gameState.GetOppJuggleDamageXFramesAgo(frames_ago + 1)
self.current_game_event.close_entry(gameState.stateLog[-1].timer_frames_remaining, hits, damage, juggle,
len(self.GameEvents))
self.GameEvents.append(self.current_game_event)
self.current_game_event = None
# print("event closed")
if gameState.WasFightReset():
# print("p1: NOW:0")
# print("p2: NOW:0")
if self.isPlayerOne:
if gameState.gameReader.flagToReacquireNames == False and self.was_fight_being_reacquired:
self.is_match_recorded = False
for entry in self.get_matchup_record(gameState):
print(entry)
round_number = gameState.GetRoundNumber()
print("!ROUND | {} | HIT".format(round_number))
if (gameState.stateLog[-1].bot.wins == 3 or gameState.stateLog[
-1].opp.wins == 3) and not self.is_match_recorded:
self.is_match_recorded = True
player_name = "You"
p1_char_name = gameState.stateLog[-1].opp.character_name
p1_wins = gameState.stateLog[-1].opp.wins
opponent_name = gameState.stateLog[-1].opponent_name
p2_char_name = gameState.stateLog[-1].bot.character_name
p2_wins = gameState.stateLog[-1].bot.wins
if gameState.stateLog[-1].is_player_player_one:
player_char, player_wins = p1_char_name, p1_wins
opponent_char, opponent_wins = p2_char_name, p2_wins
else:
player_char, player_wins = p2_char_name, p2_wins
opponent_char, opponent_wins = p1_char_name, p1_wins
if player_wins == opponent_wins:
result = 'DRAW'
elif player_wins > opponent_wins:
result = 'WIN'
else:
result = "LOSS"
match_result = '{} | {} | {} | vs | {} | {} | {}-{} | {}'.format(result, player_name, player_char,
opponent_name, opponent_char,
player_wins, opponent_wins,
time.strftime('%Y_%m_%d_%H.%M'))
print("{}".format(match_result))
self.AddStat(result, player_char, opponent_name, opponent_char)
with open(self.stat_filename, "a", encoding='utf-8') as fa:
fa.write(match_result + '\n')
if (gameState.GetTimer(frames_ago) < 3600 and len(self.GameEvents) > 0) or True:
summary = RoundSummary(self.GameEvents, gameState.GetOppRoundSummary(frames_ago))
self.GameEvents = []
self.was_fight_being_reacquired = gameState.gameReader.flagToReacquireNames
def get_matchup_record(self, gameState):
if gameState.stateLog[-1].is_player_player_one:
opponent_char = gameState.stateLog[-1].bot.character_name
player_char = gameState.stateLog[-1].opp.character_name
else:
opponent_char = gameState.stateLog[-1].opp.character_name
player_char = gameState.stateLog[-1].bot.character_name
opponent_name = gameState.stateLog[-1].opponent_name
return [
("!RECORD | vs {}: {}".format(opponent_char, self.RecordFromStat('char_stats', opponent_char))),
("!RECORD | vs {}: {}".format(opponent_name, self.RecordFromStat('opponent_stats', opponent_name))),
("!RECORD | {} vs {}: {}".format(player_char, opponent_char, self.RecordFromStat("matchup_stats",
"{} vs {}".format(
player_char,
opponent_char))))
]
def DetermineFrameData(self, gameState):
if (
gameState.IsBotBlocking() or gameState.IsBotGettingHit() or gameState.IsBotBeingThrown() or gameState.IsBotBeingKnockedDown() or gameState.IsBotBeingWallSplatted()): # or gameState.IsBotUsingOppMovelist()): #or gameState.IsBotStartedBeingJuggled() or gameState.IsBotJustGrounded()):
# print(gameState.stateLog[-1].bot.move_id)
# print(gameState.stateLog[-1].bot.move_timer)
# print(gameState.stateLog[-1].bot.recovery)
# print(gameState.DidBotIdChangeXMovesAgo(self.active_frame_wait))
if gameState.DidBotIdChangeXMovesAgo(self.active_frame_wait) or gameState.DidBotTimerInterruptXMovesAgo(
self.active_frame_wait): # or gameState.DidOppIdChangeXMovesAgo(self.active_frame_wait):
is_recovering_before_long_active_frame_move_completes = (
gameState.GetBotRecovery() - gameState.GetBotMoveTimer() == 0)
gameState.BackToTheFuture(self.active_frame_wait)
# print(gameState.GetOppActiveFrames())
if (
not self.active_frame_wait >= gameState.GetOppActiveFrames() + 1) and not is_recovering_before_long_active_frame_move_completes:
self.active_frame_wait += 1
else:
gameState.ReturnToPresent()
currentActiveFrame = gameState.GetLastActiveFrameHitWasOn(self.active_frame_wait)
gameState.BackToTheFuture(self.active_frame_wait)
opp_id = gameState.GetOppMoveId()
if opp_id in self.FrameData:
frameDataEntry = self.FrameData[opp_id]
else:
frameDataEntry = FrameDataEntry(self.print_extended_frame_data)
self.FrameData[opp_id] = frameDataEntry
frameDataEntry.currentActiveFrame = currentActiveFrame
frameDataEntry.currentFrameAdvantage = '??'
frameDataEntry.move_id = opp_id
# frameDataEntry.damage =
frameDataEntry.damage = gameState.GetOppDamage()
frameDataEntry.startup = gameState.GetOppStartup()
if frameDataEntry.damage == 0 and frameDataEntry.startup == 0:
frameDataEntry.startup, frameDataEntry.damage = gameState.GetOppLatestNonZeroStartupAndDamage()
frameDataEntry.activeFrames = gameState.GetOppActiveFrames()
frameDataEntry.hitType = AttackType(gameState.GetOppAttackType()).name
if gameState.IsOppAttackThrow():
frameDataEntry.hitType += "_THROW"
frameDataEntry.recovery = gameState.GetOppRecovery()
# frameDataEntry.input = frameDataEntry.InputTupleToInputString(gameState.GetOppLastMoveInput())
frameDataEntry.input = gameState.GetCurrentOppMoveString()
frameDataEntry.technical_state_reports = gameState.GetOppTechnicalStates(frameDataEntry.startup - 1)
frameDataEntry.tracking = gameState.GetOppTrackingType(frameDataEntry.startup)
# print(gameState.GetRangeOfMove())
gameState.ReturnToPresent()
# frameDataEntry.throwTech = gameState.GetBotThrowTech(frameDataEntry.activeFrames + frameDataEntry.startup)
frameDataEntry.throwTech = gameState.GetBotThrowTech(1)
time_till_recovery_opp = gameState.GetOppFramesTillNextMove()
time_till_recovery_bot = gameState.GetBotFramesTillNextMove()
new_frame_advantage_calc = time_till_recovery_bot - time_till_recovery_opp
frameDataEntry.currentFrameAdvantage = frameDataEntry.WithPlusIfNeeded(new_frame_advantage_calc)
if gameState.IsBotBlocking():
frameDataEntry.onBlock = new_frame_advantage_calc
else:
if gameState.IsBotGettingCounterHit():
frameDataEntry.onCounterHit = new_frame_advantage_calc
else:
frameDataEntry.onNormalHit = new_frame_advantage_calc
frameDataEntry.hitRecovery = time_till_recovery_opp
frameDataEntry.blockRecovery = time_till_recovery_bot
frameDataEntry.move_str = gameState.GetCurrentOppMoveName()
frameDataEntry.prefix = self.GetPlayerString()
print(str(frameDataEntry))
self.current_frame_data_entry = frameDataEntry
gameState.BackToTheFuture(self.active_frame_wait)
self.active_frame_wait = 1
gameState.ReturnToPresent()
class FrameDataEntry:
def __init__(self, print_extended=False):
self.print_extended = print_extended
self.prefix = '??'
self.move_id = '??'
self.move_str = '??'
self.startup = '??'
self.calculated_startup = -1
self.hitType = '??'
self.onBlock = '??'
self.onCounterHit = '??'
self.onNormalHit = '??'
self.recovery = '??'
self.damage = '??'
self.blockFrames = '??'
self.activeFrames = '??'
self.currentFrameAdvantage = '??'
self.currentActiveFrame = '??'
self.input = '??'
self.technical_state_reports = []
self.blockRecovery = '??'
self.hitRecovery = '??'
self.throwTech = None
self.tracking = ComplexMoveStates.F_MINUS
def WithPlusIfNeeded(self, value):
try:
if value >= 0:
return '+' + str(value)
else:
return str(value)
except:
return str(value)
def InputTupleToInputString(self, inputTuple):
s = ""
for input in inputTuple:
s += (input[0].name + input[1].name.replace('x', '+')).replace('N', '')
if input[2]:
s += "+R"
return s
def __repr__(self):
notes = ''
MAGIC_THROW_BREAK_NUMBER = 1
MAGIC_THROW_BREAK_FLOAT = 1
if self.throwTech != None and self.throwTech != ThrowTechs.NONE:
throw_type = self.throwTech.name
if throw_type == "TE1":
# press 1
artificial_keyboard.press_and_release_n_times(0x16, MAGIC_THROW_BREAK_FLOAT, MAGIC_THROW_BREAK_NUMBER)
elif throw_type == "TE2":
# press 2
artificial_keyboard.press_and_release_n_times(0x17, MAGIC_THROW_BREAK_FLOAT, MAGIC_THROW_BREAK_NUMBER)
elif throw_type == "TE1_2":
# press 1+2
artificial_keyboard.press_and_release_n_times(0x18, MAGIC_THROW_BREAK_FLOAT, MAGIC_THROW_BREAK_NUMBER)
notes += self.throwTech.name + " "
self.calculated_startup = self.startup
for report in self.technical_state_reports:
# if not self.print_extended:
if 'TC' in report.name and report.is_present():
notes += str(report)
elif 'TJ' in report.name and report.is_present():
notes += str(report)
elif 'PC' in report.name and report.is_present():
notes += str(report)
elif 'SKIP' in report.name and report.is_present():
# print(report)
self.calculated_startup -= report.total_present()
elif 'FROZ' in report.name and report.is_present():
# print(report)
self.calculated_startup -= report.total_present()
elif self.print_extended:
if report.is_present():
notes += str(report)
nerd_string = ""
if self.print_extended:
pass
# notes += ' stun {}'.format(self.blockRecovery)
# notes += ' a_recovery {}'.format(self.hitRecovery)
# notes += "Total:" + str(self.recovery) + "f "
if self.calculated_startup != self.startup:
self.calculated_startup = str(self.calculated_startup) + "?"
non_nerd_string = "{:^5}|{:^4}|{:^4}|{:^7}|{:^4}|{:^4}|{:^4}|{:^5}|{:^3}|{:^2}|{:^3}|{:^3}|{:^3}|".format(
str(self.input),
str(self.move_id),
self.move_str,
str(self.hitType)[:7],
str(self.calculated_startup),
self.WithPlusIfNeeded(self.onBlock),
self.WithPlusIfNeeded(self.onNormalHit),
self.WithPlusIfNeeded(self.onCounterHit),
(str(self.currentActiveFrame) + "/" + str(self.activeFrames)),
self.tracking.name.replace('_MINUS', '-').replace("_PLUS", '+').replace(ComplexMoveStates.UNKN.name, '?'),
self.recovery,
self.hitRecovery,
self.blockRecovery
)
notes_string = "{}".format(notes)
now_string = " NOW:{}".format(str(self.currentFrameAdvantage))
return self.prefix + non_nerd_string + notes_string + now_string
class GameStatEventEntry:
class EntryType(Enum):
COUNTER = 1
PUNISH = 2
WHIFF_PUNISH = 3
LOW = 4
MID = 5
THROW = 6
GROUND = 7
NO_BLOCK = 8
ARMORED = 10
UNBLOCKABLE = 12
ANTIAIR = 14
POWER_CRUSHED = 15
# Not implemented
LOW_PARRY = 9
OUT_OF_THE_AIR = 13
class PunishType(Enum):
NONE = 0
PERFECT = 1
JAB = 2
JAB_ON_LAUNCH_PUNISHIBLE = 3
def __init__(self, time_in_frames, player_string, hit_type: EntryType, combo_counter_damage):
self.start_time = time_in_frames
self.player_string = player_string
self.hit_type = hit_type
self.damage_already_on_combo_counter = combo_counter_damage
def close_entry(self, time_in_frames, total_hits, total_damage, juggle_damage, times_hit):
self.end_time = time_in_frames
self.total_hits = total_hits
self.total_damage = max(0, total_damage - self.damage_already_on_combo_counter)
self.juggle_damage = juggle_damage
print('{} {} | {} | {} | {} | {} | HIT'.format(self.player_string, self.hit_type.name, self.total_damage,
self.total_hits, self.start_time, self.end_time))
class RoundSummary:
def __init__(self, events, round_variables):
self.events = events
self.collated_events = self.collate_events(events)
total_damage = 0
sources, types = self.collated_events
# print('{} combos for {} damage'.format(types[0][0], types[0][1]))
# print('{} pokes for {} damage'.format(types[1][0], types[1][1]))
for event, hits, damage in sources:
if damage > 0:
# print('{} {} for {} damage'.format(hits, event.name, damage))
total_damage += damage
# print('total damage dealt {} ({})'.format(round_variables[1], total_damage))
def collate_events(self, events):
hits_into_juggles = 0
hits_into_pokes = 0
damage_from_juggles = 0
damage_from_pokes = 0
sources = []
for entry in GameStatEventEntry.EntryType:
occurances = 0
damage = 0
for event in events:
if entry == event.hit_type:
occurances += 1
damage += event.total_damage
if event.juggle_damage > 0:
damage_from_juggles += event.total_damage
hits_into_juggles += 1
else:
damage_from_pokes += event.total_damage
hits_into_pokes += 1
sources.append((entry, occurances, damage))
sources.sort(key=lambda x: x[2], reverse=True)
types = [(hits_into_juggles, damage_from_juggles), (hits_into_pokes, damage_from_pokes)]
return sources, types
def __repr__(self):
pass
class PunishWindow:
class Result(Enum):
NO_WINDOW = 0
NO_PUNISH = 1
PERFECT_PUNISH = 2
NO_LAUNCH_ON_LAUNCHABLE = 3
LAUNCH_ON_LAUNCHABLE = 4
JAB_ON_NOT_LAUNCHABLE = 5
NOT_YET_CLOSED = 99
def __init__(self, prefix, move_id, string_name, hit_recovery, block_recovery, active_frames):
self.prefix = prefix
self.move_id = move_id
self.name = string_name
self.hit_recovery = hit_recovery
self.block_recovery = block_recovery
self.active_frames = active_frames
self.is_window_locked = False
self.original_diff = self.get_frame_advantage()
self.upcoming_lock = False
self.frames_locked = 0
self.result = PunishWindow.Result.NOT_YET_CLOSED
def get_frame_advantage(self):
if not self.is_window_locked:
return self.block_recovery - self.hit_recovery
else:
return 0 - self.hit_recovery - self.frames_locked
def adjust_window(self, hit_recovery, block_recovery):
# if block_recovery > self.block_recovery:
self.hit_recovery = hit_recovery
if self.upcoming_lock:
self.frames_locked += 1
self.is_window_locked = True
if not self.is_window_locked:
self.block_recovery = block_recovery
if block_recovery == 0:
self.upcoming_lock = True
if self.get_frame_advantage() != self.original_diff:
print('{} NOW:{}'.format(self.prefix, FrameDataEntry.WithPlusIfNeeded(None, self.get_frame_advantage())))
self.original_diff = self.get_frame_advantage()
def close_window(self, result: Result):
self.result = result
if result != PunishWindow.Result.NO_WINDOW:
print("Closing punish window, result: {}".format(self.result.name))
|
third_party/blink/tools/blinkpy/tool/blink_tool_unittest.py
|
zipated/src
| 2,151 |
144292
|
<filename>third_party/blink/tools/blinkpy/tool/blink_tool_unittest.py
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.common.system.output_capture import OutputCapture
from blinkpy.tool.blink_tool import BlinkTool
class BlinkToolTest(unittest.TestCase):
def test_split_args_basic(self):
self.assertEqual(
BlinkTool._split_command_name_from_args(['--global-option', 'command', '--option', 'arg']),
('command', ['--global-option', '--option', 'arg']))
def test_split_args_empty(self):
self.assertEqual(
BlinkTool._split_command_name_from_args([]),
(None, []))
def test_split_args_with_no_options(self):
self.assertEqual(
BlinkTool._split_command_name_from_args(['command', 'arg']),
('command', ['arg']))
def test_command_by_name(self):
tool = BlinkTool('path')
self.assertEqual(tool.command_by_name('help').name, 'help')
self.assertIsNone(tool.command_by_name('non-existent'))
def test_help_command(self):
oc = OutputCapture()
oc.capture_output()
tool = BlinkTool('path')
tool.main(['tool', 'help'])
out, err, logs = oc.restore_output()
self.assertTrue(out.startswith('Usage: '))
self.assertEqual('', err)
self.assertEqual('', logs)
def test_help_argument(self):
oc = OutputCapture()
oc.capture_output()
tool = BlinkTool('path')
try:
tool.main(['tool', '--help'])
except SystemExit:
pass # optparse calls sys.exit after showing help.
finally:
out, err, logs = oc.restore_output()
self.assertTrue(out.startswith('Usage: '))
self.assertEqual('', err)
self.assertEqual('', logs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.