max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
biostar/recipes/test/test_ajax.py
|
Oribyne/biostar-central-fork
| 477 |
140174
|
<filename>biostar/recipes/test/test_ajax.py
import logging
import os
import json
from unittest.mock import patch, MagicMock
from django.conf import settings
from django.test import TestCase, override_settings
from django.urls import reverse
from biostar.recipes import models, auth, ajax
from biostar.utils.helpers import fake_request, get_uuid
TEST_ROOT = os.path.abspath(os.path.join(settings.BASE_DIR, 'export', 'tested'))
TOC_ROOT = os.path.join(TEST_ROOT, 'toc')
# Ensure that the table of directory exists.
os.makedirs(TOC_ROOT, exist_ok=True)
__MODULE_DIR = os.path.dirname(auth.__file__)
TEST_DIR = os.path.join(__MODULE_DIR, 'test')
IMPORT_ROOT_DIR = os.path.join(TEST_DIR, 'data')
logger = logging.getLogger('engine')
@override_settings(MEDIA_ROOT=TEST_ROOT, TOC_ROOT=TOC_ROOT, IMPORT_ROOT_DIR=IMPORT_ROOT_DIR)
class AjaxTest(TestCase):
def setUp(self):
logger.setLevel(logging.WARNING)
# Set up generic owner
self.owner = models.User.objects.create_user(username=f"tested{get_uuid(10)}", email="<EMAIL>")
self.owner.set_password("<PASSWORD>")
# Set up generic owner
self.trusted_owner = models.User.objects.create_user(username=f"tested{get_uuid(10)}", email="<EMAIL>",
is_superuser=True)
self.owner.set_password("<PASSWORD>")
self.project = auth.create_project(user=self.owner, name="tested", text="Text", summary="summary",
uid="tested")
self.project2 = auth.create_project(user=self.trusted_owner, name="tested", text="Text", summary="summary",
uid="tested")
self.recipe = auth.create_analysis(project=self.project, json_text="", template="",
security=models.Analysis.AUTHORIZED)
self.snippet_type = models.SnippetType.objects.create(name='Snippet type', owner=self.owner)
self.snippet = models.Snippet.objects.create(command='ls -l', type=self.snippet_type,
help_text='List files in directory',
owner=self.owner)
self.job = auth.create_job(analysis=self.recipe, user=self.owner)
self.job.save()
def test_check_job(self):
"""
Test AJAX function to check and update job status
"""
data = {'state': models.Job.RUNNING}
url = reverse('ajax_check_job', kwargs=dict(uid=self.job.uid))
request = fake_request(url=url, data=data, user=self.owner, method='GET')
json_response = ajax.check_job(request=request, uid=self.job.uid)
self.process_response(json_response)
def test_copy_file(self):
"""
Test AJAX function used to copy file
"""
data = {'path': os.path.join(IMPORT_ROOT_DIR, "plain-text.txt")}
url = reverse('copy_file')
request = fake_request(url=url, data=data, user=self.trusted_owner)
json_response = ajax.copy_file(request=request)
self.process_response(json_response)
def test_toggle_delete(self):
"""
Test AJAX function used to toggle delete on objects
"""
data = {'uid': self.job.uid, "type": 'job'}
url = reverse('toggle_delete')
request = fake_request(url=url, data=data, user=self.owner)
json_response = ajax.toggle_delete(request=request)
self.process_response(json_response)
def test_manage_access(self):
"""
Test AJAX function used to manage user access
"""
user2 = models.User.objects.create_user(username=f"tested{get_uuid(10)}", email="<EMAIL>")
data = {'user_id': user2.id, "project_uid": self.project.uid, "access": 'write'}
url = reverse('toggle_delete')
request = fake_request(url=url, data=data, user=self.owner)
json_response = ajax.manage_access(request)
self.process_response(json_response)
def test_copy_object(self):
"""
Test AJAX function used to copy objects
"""
user2 = models.User.objects.create_user(username=f"tested{get_uuid(10)}", email="<EMAIL>")
data = {'user_id': user2.id, "project_uid": self.project.uid, "access": 'write'}
url = reverse('toggle_delete')
request = fake_request(url=url, data=data, user=self.owner)
json_response = ajax.manage_access(request)
self.process_response(json_response)
def test_preview_json(self):
"""
Test AJAX function used to preview recipe json
"""
data = {'recipe':self.recipe.id, 'toml': "[foo]\nparam=2"}
url = reverse('preview_json')
request = fake_request(url=url, data=data, user=self.owner)
json_response = ajax.preview_json(request=request)
self.process_response(json_response)
def process_response(self, response):
"Check the response on POST request is redirected"
response_data = response.content
response_data = json.loads(response_data)
self.assertEqual(response_data['status'], 'success', f'Error :{response_data["msg"]}')
|
modin/experimental/sql/test/test_sql.py
|
Rubtsowa/modin
| 7,258 |
140204
|
<reponame>Rubtsowa/modin
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import modin.pandas as pd
import io
titanic_snippet = """passenger_id,survived,p_class,name,sex,age,sib_sp,parch,ticket,fare,cabin,embarked
1,0,3,"Braund, Mr. <NAME>",male,22,1,0,A/5 21171,7.25,,S
2,1,1,"Cumings, Mrs. <NAME> (<NAME>)",female,38,1,0,PC 17599,71.2833,C85,C
3,1,3,"Heikkinen, <NAME>",female,26,0,0,STON/O2. 3101282,7.925,,S
4,1,1,"Futrelle, Mrs. <NAME> (<NAME>)",female,35,1,0,113803,53.1,C123,S
5,0,3,"Allen, Mr. <NAME>",male,35,0,0,373450,8.05,,S
6,0,3,"Moran, Mr. James",male,,0,0,330877,8.4583,,Q
7,0,1,"McCarthy, Mr. <NAME>",male,54,0,0,17463,51.8625,E46,S
8,0,3,"Palsson, Master. <NAME>",male,2,3,1,349909,21.075,,S
9,1,3,"Johnson, Mrs. <NAME> (<NAME>)",female,27,0,2,347742,11.1333,,S
"""
def test_sql_query():
from modin.experimental.sql import query
df = pd.read_csv(io.StringIO(titanic_snippet))
sql = "SELECT survived, p_class, count(passenger_id) as count FROM (SELECT * FROM titanic WHERE survived = 1) as t1 GROUP BY survived, p_class"
query_result = query(sql, titanic=df)
expected_df = (
df[df.survived == 1]
.groupby(["survived", "p_class"])
.agg({"passenger_id": "count"})
.reset_index()
)
assert query_result.shape == expected_df.shape
values_left = expected_df.dropna().values
values_right = query_result.dropna().values
assert (values_left == values_right).all()
def test_sql_extension():
import modin.experimental.sql # noqa: F401
df = pd.read_csv(io.StringIO(titanic_snippet))
expected_df = df[df["survived"] == 1][["passenger_id", "survived"]]
sql = "SELECT passenger_id, survived WHERE survived = 1"
query_result = df.sql(sql)
assert list(query_result.columns) == ["passenger_id", "survived"]
values_left = expected_df.values
values_right = query_result.values
assert values_left.shape == values_right.shape
assert (values_left == values_right).all()
|
awesome_gans/segan/segan_train.py
|
StevenJokess/Awesome-GANs
| 739 |
140232
|
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.segan.segan_model as segan
from awesome_gans.datasets import MNISTDataSet
results = {'output': './gen_img/', 'checkpoint': './model/checkpoint', 'model': './model/SEGAN-model.ckpt'}
train_step = {
'global_step': 150001,
'logging_interval': 1500,
}
def main():
start_time = time.time() # Clocking start
# UrbanSound8K Dataset load
mnist = MNISTDataSet().data
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# CoGAN Model
model = segan.SEGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
sample_x, _ = mnist.test.next_batch(model.sample_num)
sample_y = np.zeros(shape=[model.sample_num, model.n_classes])
for i in range(10):
sample_y[10 * i : 10 * (i + 1), i] = 1
for step in range(train_step['global_step']):
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run(
[model.d_op, model.d_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Update G network
_, g_loss = s.run(
[model.g_op, model.g_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
if step % train_step['logging_interval'] == 0:
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Print loss
print("[+] Step %08d => " % step, " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss))
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
# Training G model with sample image and noise
samples_1 = s.run(
model.g_sample_1,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_2 = s.run(
model.g_sample_2,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_1 = np.reshape(samples_1, [-1] + model.image_shape[1:])
samples_2 = np.reshape(samples_2, [-1] + model.image_shape[1:])
# Summary saver
model.writer.add_summary(summary, global_step=step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir_1 = results['output'] + 'train_1_{:08d}.png'.format(step)
sample_dir_2 = results['output'] + 'train_2_{:08d}.png'.format(step)
# Generated image save
iu.save_images(samples_1, size=[sample_image_height, sample_image_width], image_path=sample_dir_1)
iu.save_images(samples_2, size=[sample_image_height, sample_image_width], image_path=sample_dir_2)
# Model save
model.saver.save(s, results['model'], global_step=step)
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
roboticstoolbox/examples/mexican-wave.py
|
tassos/robotics-toolbox-python
| 749 |
140234
|
<filename>roboticstoolbox/examples/mexican-wave.py
#!/usr/bin/env python
"""
@author <NAME>
@author <NAME>
"""
# A circle of Puma robot's doing a Mexican wave
import numpy as np
from spatialmath import SE3
import roboticstoolbox as rtb
from roboticstoolbox.backends.swift import Swift
import time
swift = Swift()
swift.launch()
puma0 = rtb.models.URDF.Puma560()
pumas = []
num_robots = 15
rotation = 2 * np.pi * ((num_robots - 1) / num_robots)
for theta in np.linspace(0, rotation, num_robots):
base = SE3.Rz(theta) * SE3(2, 0, 0)
# Clone the robot
puma = rtb.ERobot(puma0)
puma.base = base
puma.q = puma0.qz
swift.add(puma)
pumas.append(puma)
# The wave is a Gaussian that moves around the circle
tt = np.linspace(0, num_robots, num_robots * 10)
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
g = gaussian(tt, 5, 1)
t = 0
while True:
for i, puma in enumerate(pumas):
k = (t + i * 10) % len(tt)
puma.q = np.r_[0, g[k], -g[k], 0, 0, 0]
swift.step(0)
time.sleep(0.001)
t += 1
|
sopaper/queryhandler.py
|
gonzalorodrigo/SoPaper
| 158 |
140250
|
#!../manage/exec-in-virtualenv.sh
# -*- coding: UTF-8 -*-
# File: queryhandler.py
# Date: Thu Jun 18 22:52:39 2015 +0800
# Author: <NAME> <<EMAIL>>
from bson.binary import Binary
from threading import Thread
from multiprocessing import Pool
from ukdbconn import get_mongo, global_counter, new_paper
from uklogger import *
from lib.textutil import title_beautify, parse_file_size
import searcher
from searcher import searcher_run
import fetcher
from job import JobContext
from dbsearch import *
from pdfprocess import postprocess
from lib.downloader import ProgressPrinter
from contentsearch import SoPaperSearcher
import ukconfig
# global. save all ongoing download
progress_dict = {}
class Updater(ProgressPrinter):
def __init__(self, pid):
self.pid = pid
super(Updater, self).__init__()
def update(self, done):
percent = float(done) / self.total
progress_dict[self.pid] = percent
super(Updater, self).update(done)
def start_download(dl_candidates, ctx, pid):
dl_candidates = sorted(dl_candidates, key=lambda x: x[0].priority,
reverse=True)
updater = Updater(pid)
for (parser, sr) in dl_candidates:
data = parser.download(sr, updater)
if data:
db = get_mongo('paper')
try:
db.update({'_id': pid},
{'$set': {
'pdf': Binary(data),
'page_url': sr.url,
'source': parser.name
}})
except:
log_exc("Save pdf data error")
postprocess(data, ctx, pid)
progress_dict.pop(pid, None)
return
progress_dict.pop(pid, None)
def handle_title_query(query):
query = title_beautify(query)
log_info("Get title query: {0}".format(query))
#starts search
res = search_startswith(query) # and the idf is large
if res:
log_info("Found {0} results in db: {1}".format(
len(res), str([x['_id'] for x in res])))
return res
# similar search
res = similar_search(query)
if res:
log_info(u"Found similar results in db: {0}".format(res['_id']))
return [res]
# search on web
searchers = searcher.register_searcher.get_searcher_list()
parsers = fetcher.register_parser.get_parser_list()
ctx = JobContext(query)
args = zip(searchers, [ctx] * len(searchers))
pool = Pool()
async_results = [pool.apply_async(searcher_run, arg) for arg in args]
# Search and get all the results item
all_search_results = []
for s in async_results:
s = s.get(ukconfig.PYTHON_POOL_TIMEOUT)
if s is None:
continue
srs = s['results']
# try search database with updated title
try:
updated_title = s['ctx_update']['title']
except KeyError:
pass
else:
if updated_title != query:
query = updated_title
res = search_exact(query)
if res:
log_info("Found {0} results in db: {1}".format(
len(res), str([x['_id'] for x in res])))
return res
all_search_results.extend(srs)
meta = s.get('ctx_update')
if meta:
log_info('Meat update from searcher: {0}'.format(str(meta.keys())))
ctx.update_meta_dict(meta)
pool.close()
pool.terminate()
# Analyse each result and try to parse info
download_candidates = []
parser_used = set()
found = False
for sr in all_search_results:
for parser in parsers:
if parser.can_handle(sr):
download_candidates.append((parser, sr))
if ctx.need_field(parser.support_meta_field):
# Already tried this fetcher
if not parser.repeatable and \
parser.name in parser_used:
continue
else:
parser_used.add(parser.name)
succ = parser.fetch_info(ctx, sr)
if not succ:
continue
found = True
if ctx.existing is not None:
log_info("Found {0} results in db".format(len(ctx.existing)))
return [ctx.existing]
# no metadata or downloadable source found
if not found and len(download_candidates) == 0:
return None
# Save data, return data and start downloading
try:
pid = new_paper(ctx)
ret = [{'_id': pid,
'title': ctx.title,
'view_cnt': 1,
'download_cnt': 0
}]
ret[0].update(ctx.meta)
progress_dict[pid] = 0.0
if len(download_candidates) > 0:
thread = Thread(target=start_download, args=(download_candidates,
ctx, pid))
thread.start()
return ret
except:
log_exc("Failed to save to db")
sp_searcher = SoPaperSearcher()
def handl_author_query(q):
db = get_mongo('paper')
res = list(db.find({'author': q}, SEARCH_RETURN_FIELDS))
return res
def handle_content_query(query):
log_info("Get content query: {0}".format(query))
res = sp_searcher.search(query)
db = get_mongo('paper')
def transform(r):
pid = long(r['_id'])
# XXX should find use '$in' and then do sorting
doc = db.find_one({'_id': pid}, SEARCH_RETURN_FIELDS)
if not doc:
raise Exception("Impossible! Mongo doesn't have this paper in index: {0}".format(pid))
doc['content'] = r['content']
doc['weight'] = r['weight']
return doc
ret = map(transform, res)
return ret
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
res = handle_title_query(sys.argv[1])
sys.exit(0)
#res = handle_title_query('test test test this is not a paper name')
#res = handle_title_query('Intriguing properties of neural networks')
res = handle_content_query('neural networks')
#res = handle_title_query("The WEka data mining software an update")
#res = handle_title_query("linear")
#print res
|
models/maskrcnn/metric.py
|
tingyumao94/groupsoftmax-simpledet
| 153 |
140268
|
<reponame>tingyumao94/groupsoftmax-simpledet<filename>models/maskrcnn/metric.py<gh_stars>100-1000
import numpy as np
import mxnet as mx
class SigmoidCELossMetric(mx.metric.EvalMetric):
def __init__(self, name, output_names, label_names):
super().__init__(name, output_names, label_names)
def update(self, labels, preds):
self.sum_metric += preds[0].mean().asscalar()
self.num_inst += 1
|
tests/tests_tuner/test_time_controller.py
|
stjordanis/mljar-supervised
| 1,882 |
140270
|
import os
import time
import unittest
from numpy.testing import assert_almost_equal
from supervised.tuner.time_controller import TimeController
class TimeControllerTest(unittest.TestCase):
def test_to_and_from_json(self):
tc = TimeController(
start_time=time.time(),
total_time_limit=10,
model_time_limit=None,
steps=["simple_algorithms"],
algorithms=["Baseline"],
)
tc.log_time("1_Baseline", "Baseline", "simple_algorithms", 123.1)
tc2 = TimeController.from_json(tc.to_json())
assert_almost_equal(tc2.step_spend("simple_algorithms"), 123.1)
assert_almost_equal(tc2.model_spend("Baseline"), 123.1)
def test_enough_time_for_stacking(self):
for t in [5, 10, 20]:
tc = TimeController(
start_time=time.time(),
total_time_limit=100,
model_time_limit=None,
steps=[
"default_algorithms",
"not_so_random",
"golden_features",
"insert_random_feature",
"features_selection",
"hill_climbing_1",
"hill_climbing_3",
"hill_climbing_5",
"ensemble",
"stack",
"ensemble_stacked",
],
algorithms=["Xgboost"],
)
tc.log_time("1_Xgboost", "Xgboost", "default_algorithms", t)
tc.log_time("2_Xgboost", "Xgboost", "not_so_random", t)
tc.log_time("3_Xgboost", "Xgboost", "insert_random_feature", t)
tc.log_time("4_Xgboost", "Xgboost", "features_selection", t)
tc.log_time("5_Xgboost", "Xgboost", "hill_climbing_1", t)
tc.log_time("6_Xgboost", "Xgboost", "hill_climbing_2", t)
tc.log_time("7_Xgboost", "Xgboost", "hill_climbing_3", t)
tc._start_time = time.time() - 7 * t
assert_almost_equal(tc.already_spend(), 7 * t)
if t < 20:
self.assertTrue(tc.enough_time("Xgboost", "stack"))
else:
self.assertFalse(tc.enough_time("Xgboost", "stack"))
self.assertTrue(tc.enough_time("Ensemble_Stacked", "ensemble_stacked"))
|
readthedocs/metrics/tasks.py
|
mforbes/readthedocs.org
| 4,054 |
140292
|
"""
Specific metric tasks for community.
Override the base metric tasks to add specific ones only required on community.
"""
# Disable import error because -ext is not available on pylint
# pylint: disable=import-error
from readthedocsext.monitoring.metrics.database import (
AvgBuildTimeMetric,
AvgBuildTriggeredAndFirstCommandTimeMetric,
ConcurrencyLimitedBuildsMetric,
RunningBuildsMetric,
)
from readthedocsext.monitoring.metrics.redislen import RedislenMetric
from readthedocsext.monitoring.metrics.latency import BuildLatencyMetric
from readthedocsext.monitoring.metrics.tasks import (
Metrics1mTaskBase,
Metrics5mTaskBase,
)
class CommunityMetrics1mTask(Metrics1mTaskBase):
metrics = Metrics1mTaskBase.metrics + [
RedislenMetric(queue_name='build-large'),
RunningBuildsMetric(builder='large'),
ConcurrencyLimitedBuildsMetric(builder='large'),
]
class CommunityMetrics5mTask(Metrics5mTaskBase):
metrics = Metrics5mTaskBase.metrics + [
AvgBuildTimeMetric(
builder='large',
minutes=Metrics5mTaskBase.interval,
),
AvgBuildTriggeredAndFirstCommandTimeMetric(
builder='large',
minutes=Metrics5mTaskBase.interval,
),
BuildLatencyMetric(
project='time-test',
queue_name='build-large',
version='latency-test-large',
doc='index',
section='Time',
doc_url=None,
api_host='https://readthedocs.org',
webhook_url='https://readthedocs.org/api/v2/webhook/time-test/125903/',
),
]
|
Allura/allura/tests/test_webhooks.py
|
rohankumardubey/allura
| 113 |
140302
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import hmac
import hashlib
import datetime as dt
from mock import Mock, MagicMock, patch, call
from alluratest.tools import (
assert_raises,
assert_equal,
assert_not_in,
assert_in,
)
from datadiff import tools as dd
from formencode import Invalid
from ming.odm import session
from tg import tmpl_context as c
from tg import config
from allura import model as M
from allura.lib import helpers as h
from allura.webhooks import (
WebhookValidator,
WebhookController,
send_webhook,
RepoPushWebhookSender,
SendWebhookHelper,
)
from allura.tests import decorators as td
from alluratest.controller import (
setup_basic_test,
TestController,
TestRestApiBase,
)
import six
from six.moves import range
# important to be distinct from 'test' and 'test2' which ForgeGit and
# ForgeImporter use, so that the tests can run in parallel and not clobber each
# other
test_project_with_repo = 'adobe-1'
with_git = td.with_tool(test_project_with_repo, 'git', 'src', 'Git')
with_git2 = td.with_tool(test_project_with_repo, 'git', 'src2', 'Git2')
class TestWebhookBase(object):
def setUp(self):
setup_basic_test()
self.patches = self.monkey_patch()
for p in self.patches:
p.start()
self.setup_with_tools()
self.project = M.Project.query.get(shortname=test_project_with_repo)
self.git = self.project.app_instance('src')
self.wh = M.Webhook(
type='repo-push',
app_config_id=self.git.config._id,
hook_url='http://httpbin.org/post',
secret='secret')
session(self.wh).flush(self.wh)
def tearDown(self):
for p in self.patches:
p.stop()
@with_git
def setup_with_tools(self):
pass
def monkey_patch(self):
# we don't need actual repo here, and this avoids test conflicts when
# running in parallel
repo_init = patch.object(M.Repository, 'init', autospec=True)
return [repo_init]
class TestValidators(TestWebhookBase):
@with_git2
def test_webhook_validator(self):
sender = Mock(type='repo-push')
app = self.git
invalid_app = self.project.app_instance('src2')
v = WebhookValidator(sender=sender, app=app, not_empty=True)
with assert_raises(Invalid) as cm:
v.to_python(None)
assert_equal(cm.exception.msg, 'Please enter a value')
with assert_raises(Invalid) as cm:
v.to_python('invalid id')
assert_equal(cm.exception.msg, 'Invalid webhook')
wh = M.Webhook(type='invalid type',
app_config_id=invalid_app.config._id,
hook_url='http://hooks.slack.com',
secret='secret')
session(wh).flush(wh)
# invalid type
with assert_raises(Invalid) as cm:
v.to_python(wh._id)
assert_equal(cm.exception.msg, 'Invalid webhook')
wh.type = 'repo-push'
session(wh).flush(wh)
# invalild app
with assert_raises(Invalid) as cm:
v.to_python(wh._id)
assert_equal(cm.exception.msg, 'Invalid webhook')
wh.app_config_id = app.config._id
session(wh).flush(wh)
assert_equal(v.to_python(wh._id), wh)
assert_equal(v.to_python(six.text_type(wh._id)), wh)
class TestWebhookController(TestController):
def setUp(self):
super(TestWebhookController, self).setUp()
self.patches = self.monkey_patch()
for p in self.patches:
p.start()
self.setup_with_tools()
self.project = M.Project.query.get(shortname=test_project_with_repo)
self.git = self.project.app_instance('src')
self.url = str(self.git.admin_url + 'webhooks')
def tearDown(self):
super(TestWebhookController, self).tearDown()
for p in self.patches:
p.stop()
@with_git
def setup_with_tools(self):
pass
def monkey_patch(self):
gen_secret = patch.object(
WebhookController,
'gen_secret',
return_value='super-secret',
autospec=True)
# we don't need actual repo here, and this avoids test conflicts when
# running in parallel
repo_init = patch.object(M.Repository, 'init', autospec=True)
return [gen_secret, repo_init]
def create_webhook(self, data, url=None):
url = url or self.url
r = self.app.post(url + '/repo-push/create', data)
wf = json.loads(self.webflash(r))
assert_equal(wf['status'], 'ok')
assert_equal(wf['message'], 'Created successfully')
return r
def find_error(self, r, field, msg, form_type='create'):
form = r.html.find('form', attrs={'action': form_type})
if field == '_the_form':
error = form.findPrevious('div', attrs={'class': 'error'})
else:
error = form.find('input', attrs={'name': field})
error = error.findNext('div', attrs={'class': 'error'})
if error:
assert_in(msg, error.getText())
else:
assert False, 'Validation error not found'
def test_access(self):
self.app.get(self.url + '/repo-push/')
self.app.get(self.url + '/repo-push/',
extra_environ={'username': str('test-user')},
status=403)
r = self.app.get(self.url + '/repo-push/',
extra_environ={'username': str('*anonymous')},
status=302)
assert_equal(r.location,
'http://localhost/auth/'
'?return_to=%2Fadobe%2Fadobe-1%2Fadmin%2Fsrc%2Fwebhooks%2Frepo-push%2F')
def test_invalid_hook_type(self):
self.app.get(self.url + '/invalid-hook-type/', status=404)
def test_create(self):
assert_equal(M.Webhook.query.find().count(), 0)
r = self.app.get(self.url)
assert_in('<h1>repo-push</h1>', r)
assert_not_in('http://httpbin.org/post', r)
data = {'url': 'http://httpbin.org/post',
'secret': ''}
msg = 'add webhook repo-push {} {}'.format(
data['url'], self.git.config.url())
with td.audits(msg):
r = self.create_webhook(data).follow()
assert_in('http://httpbin.org/post', r)
hooks = M.Webhook.query.find().all()
assert_equal(len(hooks), 1)
assert_equal(hooks[0].type, 'repo-push')
assert_equal(hooks[0].hook_url, 'http://httpbin.org/post')
assert_equal(hooks[0].app_config_id, self.git.config._id)
assert_equal(hooks[0].secret, 'super-secret')
# Try to create duplicate
with td.out_audits(msg):
r = self.app.post(self.url + '/repo-push/create', data)
self.find_error(r, '_the_form',
'"repo-push" webhook already exists for Git http://httpbin.org/post')
assert_equal(M.Webhook.query.find().count(), 1)
def test_create_limit_reached(self):
assert_equal(M.Webhook.query.find().count(), 0)
limit = json.dumps({'git': 1})
with h.push_config(config, **{'webhook.repo_push.max_hooks': limit}):
data = {'url': 'http://httpbin.org/post',
'secret': ''}
r = self.create_webhook(data).follow()
assert_equal(M.Webhook.query.find().count(), 1)
r = self.app.post(self.url + '/repo-push/create', data)
wf = json.loads(self.webflash(r))
assert_equal(wf['status'], 'error')
assert_equal(
wf['message'],
'You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app')
assert_equal(M.Webhook.query.find().count(), 1)
def test_create_validation(self):
assert_equal(M.Webhook.query.find().count(), 0)
r = self.app.post(
self.url + '/repo-push/create', {}, status=404)
data = {'url': '', 'secret': ''}
r = self.app.post(self.url + '/repo-push/create', data)
self.find_error(r, 'url', 'Please enter a value')
data = {'url': 'qwer', 'secret': 'qwe'}
r = self.app.post(self.url + '/repo-push/create', data)
self.find_error(r, 'url',
'You must provide a full domain name (like qwer.com)')
def test_AAAA_WORKAROUND__edit(self):
"""
This must run first in this test class for unknown reasons ever since
https://github.com/TurboGears/tg2/commit/02fb49b14e70fdd8ac16973488fb3637e5e59114
If any test runs the self.app.post from create_webhook before this one, then this test will fail on:
with td.audits(msg):
r = form.submit()
because WebhookValidator's `value` will be "create" instead of an objectid str
Maybe something to do with WebhookControllerMeta setup of `validate` decorators?
"""
data1 = {'url': 'http://httpbin.org/post',
'secret': 'secret'}
data2 = {'url': 'http://example.com/hook',
'secret': 'secret2'}
self.create_webhook(data1).follow()
self.create_webhook(data2).follow()
assert_equal(M.Webhook.query.find().count(), 2)
wh1 = M.Webhook.query.get(hook_url=data1['url'])
r = self.app.get(self.url + '/repo-push/%s' % wh1._id)
form = r.forms[0]
assert_equal(form['url'].value, data1['url'])
assert_equal(form['secret'].value, data1['secret'])
assert_equal(form['webhook'].value, six.text_type(wh1._id))
form['url'] = 'http://host.org/hook'
form['secret'] = 'new secret'
msg = 'edit webhook repo-push\n{} => {}\n{}'.format(
data1['url'], form['url'].value, 'secret changed')
with td.audits(msg):
r = form.submit()
wf = json.loads(self.webflash(r))
assert_equal(wf['status'], 'ok')
assert_equal(wf['message'], 'Edited successfully')
assert_equal(M.Webhook.query.find().count(), 2)
wh1 = M.Webhook.query.get(_id=wh1._id)
assert_equal(wh1.hook_url, 'http://host.org/hook')
assert_equal(wh1.app_config_id, self.git.config._id)
assert_equal(wh1.secret, 'new secret')
assert_equal(wh1.type, 'repo-push')
# Duplicates
r = self.app.get(self.url + '/repo-push/%s' % wh1._id)
form = r.forms[0]
form['url'] = data2['url']
r = form.submit()
self.find_error(r, '_the_form',
'"repo-push" webhook already exists for Git http://example.com/hook',
form_type='edit')
def test_edit_validation(self):
invalid = M.Webhook(
type='invalid type',
app_config_id=None,
hook_url='http://httpbin.org/post',
secret='secret')
session(invalid).flush(invalid)
self.app.get(self.url + '/repo-push/%s' % invalid._id, status=404)
data = {'url': 'http://httpbin.org/post',
'secret': 'secret'}
self.create_webhook(data).follow()
wh = M.Webhook.query.get(hook_url=data['url'], type='repo-push')
# invalid id in hidden field, just in case
r = self.app.get(self.url + '/repo-push/%s' % wh._id)
data = {k: v[0].value for (k, v) in r.forms[0].fields.items() if k}
data['webhook'] = six.text_type(invalid._id)
self.app.post(self.url + '/repo-push/edit', data, status=404)
# empty values
data = {'url': '', 'secret': '', 'webhook': str(wh._id)}
r = self.app.post(self.url + '/repo-push/edit', data)
self.find_error(r, 'url', 'Please enter a value', 'edit')
data = {'url': 'qwe', 'secret': 'qwe', 'webhook': str(wh._id)}
r = self.app.post(self.url + '/repo-push/edit', data)
self.find_error(r, 'url',
'You must provide a full domain name (like qwe.com)', 'edit')
def test_delete(self):
data = {'url': 'http://httpbin.org/post',
'secret': 'secret'}
self.create_webhook(data).follow()
assert_equal(M.Webhook.query.find().count(), 1)
wh = M.Webhook.query.get(hook_url=data['url'])
data = {'webhook': six.text_type(wh._id)}
msg = 'delete webhook repo-push {} {}'.format(
wh.hook_url, self.git.config.url())
with td.audits(msg):
r = self.app.post(self.url + '/repo-push/delete', data)
assert_equal(r.json, {'status': 'ok'})
assert_equal(M.Webhook.query.find().count(), 0)
def test_delete_validation(self):
invalid = M.Webhook(
type='invalid type',
app_config_id=None,
hook_url='http://httpbin.org/post',
secret='secret')
session(invalid).flush(invalid)
assert_equal(M.Webhook.query.find().count(), 1)
data = {'webhook': ''}
self.app.post(self.url + '/repo-push/delete', data, status=404)
data = {'webhook': six.text_type(invalid._id)}
self.app.post(self.url + '/repo-push/delete', data, status=404)
assert_equal(M.Webhook.query.find().count(), 1)
@with_git2
def test_list_webhooks(self):
git2 = self.project.app_instance('src2')
url2 = str(git2.admin_url + 'webhooks')
data1 = {'url': 'http://httpbin.org/post',
'secret': 'secret'}
data2 = {'url': 'http://another-host.org/',
'secret': 'secret2'}
data3 = {'url': 'http://another-app.org/',
'secret': 'secret3'}
self.create_webhook(data1).follow()
self.create_webhook(data2).follow()
self.create_webhook(data3, url=url2).follow()
wh1 = M.Webhook.query.get(hook_url=data1['url'])
wh2 = M.Webhook.query.get(hook_url=data2['url'])
r = self.app.get(self.url)
assert_in('<h1>repo-push</h1>', r)
rows = r.html.find('table').findAll('tr')
assert_equal(len(rows), 2)
rows = sorted([self._format_row(row) for row in rows], key=lambda rows: rows[0]['text'])
expected_rows = sorted([
[{'text': wh1.hook_url},
{'text': wh1.secret},
{'href': self.url + '/repo-push/' + str(wh1._id),
'text': 'Edit'},
{'href': self.url + '/repo-push/delete',
'data-id': str(wh1._id)}],
[{'text': wh2.hook_url},
{'text': wh2.secret},
{'href': self.url + '/repo-push/' + str(wh2._id),
'text': 'Edit'},
{'href': self.url + '/repo-push/delete',
'data-id': str(wh2._id)}],
], key=lambda rows: rows[0]['text'])
assert_equal(rows, expected_rows)
# make sure webhooks for another app is not visible
assert_not_in('http://another-app.org/', r)
assert_not_in('secret3', r)
def _format_row(self, row):
def link(td):
a = td.find('a')
return {'href': a.get('href'), 'text': a.getText()}
def text(td):
return {'text': td.getText()}
def delete_btn(td):
a = td.find('a')
return {'href': a.get('href'), 'data-id': a.get('data-id')}
tds = row.findAll('td')
return [text(tds[0]), text(tds[1]), link(tds[2]), delete_btn(tds[3])]
class TestSendWebhookHelper(TestWebhookBase):
def setUp(self, *args, **kw):
super(TestSendWebhookHelper, self).setUp(*args, **kw)
self.payload = {'some': ['data', 23]}
self.h = SendWebhookHelper(self.wh, self.payload)
def test_timeout(self):
assert_equal(self.h.timeout, 30)
with h.push_config(config, **{'webhook.timeout': 10}):
assert_equal(self.h.timeout, 10)
def test_retries(self):
assert_equal(self.h.retries, [60, 120, 240])
with h.push_config(config, **{'webhook.retry': '1 2 3 4 5 6'}):
assert_equal(self.h.retries, [1, 2, 3, 4, 5, 6])
def test_sign(self):
json_payload = json.dumps(self.payload)
signature = hmac.new(
self.wh.secret.encode('utf-8'),
json_payload.encode('utf-8'),
hashlib.sha1)
signature = 'sha1=' + signature.hexdigest()
assert_equal(self.h.sign(json_payload), signature)
def test_log_msg(self):
assert_equal(
self.h.log_msg('OK'),
'OK: repo-push http://httpbin.org/post /adobe/adobe-1/src/')
response = Mock(
status_code=500,
text='that is why',
headers={str('Content-Type'): str('application/json')})
assert_equal(
self.h.log_msg('Error', response=response),
"Error: repo-push http://httpbin.org/post /adobe/adobe-1/src/ 500 "
"that is why {'Content-Type': 'application/json'}")
@patch('allura.webhooks.SendWebhookHelper', autospec=True)
def test_send_webhook_task(self, swh):
send_webhook(self.wh._id, self.payload)
swh.assert_called_once_with(self.wh, self.payload)
@patch('allura.webhooks.requests', autospec=True)
@patch('allura.webhooks.log', autospec=True)
def test_send(self, log, requests):
requests.post.return_value = Mock(status_code=200)
self.h.sign = Mock(return_value='sha1=abc')
self.h.send()
headers = {'content-type': 'application/json',
'User-Agent': 'Allura Webhook (https://allura.apache.org/)',
'X-Allura-Signature': 'sha1=abc'}
requests.post.assert_called_once_with(
self.wh.hook_url,
data=json.dumps(self.payload),
headers=headers,
timeout=30)
log.info.assert_called_once_with(
'Webhook successfully sent: %s %s %s' % (
self.wh.type, self.wh.hook_url, self.wh.app_config.url()))
@patch('allura.webhooks.time', autospec=True)
@patch('allura.webhooks.requests', autospec=True)
@patch('allura.webhooks.log', autospec=True)
def test_send_error_response_status(self, log, requests, time):
requests.post.return_value = Mock(status_code=500)
self.h.send()
assert_equal(requests.post.call_count, 4) # initial call + 3 retries
assert_equal(time.sleep.call_args_list,
[call(60), call(120), call(240)])
assert_equal(log.info.call_args_list, [
call('Retrying webhook in: %s', [60, 120, 240]),
call('Retrying webhook in %s seconds', 60),
call('Retrying webhook in %s seconds', 120),
call('Retrying webhook in %s seconds', 240)])
assert_equal(log.error.call_count, 4)
log.error.assert_called_with(
'Webhook send error: %s %s %s %s %s %s' % (
self.wh.type, self.wh.hook_url,
self.wh.app_config.url(),
requests.post.return_value.status_code,
requests.post.return_value.text,
requests.post.return_value.headers))
@patch('allura.webhooks.time', autospec=True)
@patch('allura.webhooks.requests', autospec=True)
@patch('allura.webhooks.log', autospec=True)
def test_send_error_no_retries(self, log, requests, time):
requests.post.return_value = Mock(status_code=500)
with h.push_config(config, **{'webhook.retry': ''}):
self.h.send()
assert_equal(requests.post.call_count, 1)
assert_equal(time.call_count, 0)
log.info.assert_called_once_with('Retrying webhook in: %s', [])
assert_equal(log.error.call_count, 1)
log.error.assert_called_with(
'Webhook send error: %s %s %s %s %s %s' % (
self.wh.type, self.wh.hook_url,
self.wh.app_config.url(),
requests.post.return_value.status_code,
requests.post.return_value.text,
requests.post.return_value.headers))
class TestRepoPushWebhookSender(TestWebhookBase):
@patch('allura.webhooks.send_webhook', autospec=True)
def test_send(self, send_webhook):
sender = RepoPushWebhookSender()
sender.get_payload = Mock()
with h.push_config(c, app=self.git):
sender.send(dict(arg1=1, arg2=2))
send_webhook.post.assert_called_once_with(
self.wh._id,
sender.get_payload.return_value)
@patch('allura.webhooks.send_webhook', autospec=True)
def test_send_with_list(self, send_webhook):
sender = RepoPushWebhookSender()
sender.get_payload = Mock(side_effect=[1, 2])
self.wh.enforce_limit = Mock(return_value=True)
with h.push_config(c, app=self.git):
sender.send([dict(arg1=1, arg2=2), dict(arg1=3, arg2=4)])
assert_equal(send_webhook.post.call_count, 2)
assert_equal(send_webhook.post.call_args_list,
[call(self.wh._id, 1), call(self.wh._id, 2)])
assert_equal(self.wh.enforce_limit.call_count, 1)
@patch('allura.webhooks.log', autospec=True)
@patch('allura.webhooks.send_webhook', autospec=True)
def test_send_limit_reached(self, send_webhook, log):
sender = RepoPushWebhookSender()
sender.get_payload = Mock()
self.wh.enforce_limit = Mock(return_value=False)
with h.push_config(c, app=self.git):
sender.send(dict(arg1=1, arg2=2))
assert_equal(send_webhook.post.call_count, 0)
log.warn.assert_called_once_with(
'Webhook fires too often: %s. Skipping', self.wh)
@patch('allura.webhooks.send_webhook', autospec=True)
def test_send_no_configured_webhooks(self, send_webhook):
self.wh.delete()
session(self.wh).flush(self.wh)
sender = RepoPushWebhookSender()
with h.push_config(c, app=self.git):
sender.send(dict(arg1=1, arg2=2))
assert_equal(send_webhook.post.call_count, 0)
def test_get_payload(self):
sender = RepoPushWebhookSender()
_ci = lambda x: MagicMock(webhook_info={'id': str(x)}, parent_ids=['0'])
with patch.object(self.git.repo, 'commit', new=_ci):
with h.push_config(c, app=self.git):
result = sender.get_payload(commit_ids=['1', '2', '3'], ref='ref')
expected_result = {
'size': 3,
'commits': [{'id': '1'}, {'id': '2'}, {'id': '3'}],
'ref': 'ref',
'after': '1',
'before': '0',
'repository': {
'full_name': '/adobe/adobe-1/src/',
'name': 'Git',
'url': 'http://localhost/adobe/adobe-1/src/',
},
}
assert_equal(result, expected_result)
def test_enforce_limit(self):
def add_webhooks(suffix, n):
for i in range(n):
webhook = M.Webhook(
type='repo-push',
app_config_id=self.git.config._id,
hook_url='http://httpbin.org/{}/{}'.format(suffix, i),
secret='secret')
session(webhook).flush(webhook)
sender = RepoPushWebhookSender()
# default
assert_equal(sender.enforce_limit(self.git), True)
add_webhooks('one', 3)
assert_equal(sender.enforce_limit(self.git), False)
# config
limit = json.dumps({'git': 5})
with h.push_config(config, **{'webhook.repo_push.max_hooks': limit}):
assert_equal(sender.enforce_limit(self.git), True)
add_webhooks('two', 3)
assert_equal(sender.enforce_limit(self.git), False)
def test_before(self):
sender = RepoPushWebhookSender()
with patch.object(self.git.repo, 'commit', autospec=True) as _ci:
assert_equal(sender._before(self.git.repo, ['3', '2', '1']), '')
_ci.return_value.parent_ids = ['0']
assert_equal(sender._before(self.git.repo, ['3', '2', '1']), '0')
def test_after(self):
sender = RepoPushWebhookSender()
assert_equal(sender._after([]), '')
assert_equal(sender._after(['3', '2', '1']), '3')
def test_convert_id(self):
sender = RepoPushWebhookSender()
assert_equal(sender._convert_id(''), '')
assert_equal(sender._convert_id('a433fa9'), 'a433fa9')
assert_equal(sender._convert_id('a433fa9:13'), 'r13')
class TestModels(TestWebhookBase):
def test_webhook_url(self):
assert_equal(self.wh.url(),
'/adobe/adobe-1/admin/src/webhooks/repo-push/{}'.format(self.wh._id))
def test_webhook_enforce_limit(self):
self.wh.last_sent = None
assert_equal(self.wh.enforce_limit(), True)
# default value
self.wh.last_sent = dt.datetime.utcnow() - dt.timedelta(seconds=31)
assert_equal(self.wh.enforce_limit(), True)
self.wh.last_sent = dt.datetime.utcnow() - dt.timedelta(seconds=15)
assert_equal(self.wh.enforce_limit(), False)
# value from config
with h.push_config(config, **{'webhook.repo_push.limit': 100}):
self.wh.last_sent = dt.datetime.utcnow() - dt.timedelta(seconds=101)
assert_equal(self.wh.enforce_limit(), True)
self.wh.last_sent = dt.datetime.utcnow() - dt.timedelta(seconds=35)
assert_equal(self.wh.enforce_limit(), False)
@patch('allura.model.webhook.dt', autospec=True)
def test_update_limit(self, dt_mock):
_now = dt.datetime(2015, 2, 2, 13, 39)
dt_mock.datetime.utcnow.return_value = _now
assert_equal(self.wh.last_sent, None)
self.wh.update_limit()
session(self.wh).expunge(self.wh)
assert_equal(M.Webhook.query.get(_id=self.wh._id).last_sent, _now)
def test_json(self):
expected = {
'_id': six.text_type(self.wh._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(self.wh._id),
'type': 'repo-push',
'hook_url': 'http://httpbin.org/post',
'mod_date': self.wh.mod_date,
}
dd.assert_equal(self.wh.__json__(), expected)
class TestWebhookRestController(TestRestApiBase):
def setUp(self):
super(TestWebhookRestController, self).setUp()
self.patches = self.monkey_patch()
for p in self.patches:
p.start()
self.setup_with_tools()
self.project = M.Project.query.get(shortname=test_project_with_repo)
self.git = self.project.app_instance('src')
self.url = str('/rest' + self.git.admin_url + 'webhooks')
self.webhooks = []
for i in range(3):
webhook = M.Webhook(
type='repo-push',
app_config_id=self.git.config._id,
hook_url='http://httpbin.org/post/{}'.format(i),
secret='secret-{}'.format(i))
session(webhook).flush(webhook)
self.webhooks.append(webhook)
def tearDown(self):
super(TestWebhookRestController, self).tearDown()
for p in self.patches:
p.stop()
@with_git
def setup_with_tools(self):
pass
def monkey_patch(self):
gen_secret = patch.object(
WebhookController,
'gen_secret',
return_value='super-secret',
autospec=True)
# we don't need actual repo here, and this avoids test conflicts when
# running in parallel
repo_init = patch.object(M.Repository, 'init', autospec=True)
return [gen_secret, repo_init]
def test_webhooks_list(self):
r = self.api_get(self.url)
webhooks = [{
'_id': six.text_type(wh._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(wh._id),
'type': 'repo-push',
'hook_url': 'http://httpbin.org/post/{}'.format(n),
'mod_date': six.text_type(wh.mod_date),
} for n, wh in enumerate(self.webhooks)]
expected = {
'webhooks': webhooks,
'limits': {'repo-push': {'max': 3, 'used': 3}},
}
dd.assert_equal(r.json, expected)
def test_webhook_GET_404(self):
r = self.api_get(self.url + '/repo-push/invalid', status=404)
def test_webhook_GET(self):
webhook = self.webhooks[0]
r = self.api_get('{}/repo-push/{}'.format(self.url, webhook._id))
expected = {
'_id': six.text_type(webhook._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(webhook._id),
'type': 'repo-push',
'hook_url': 'http://httpbin.org/post/0',
'mod_date': six.text_type(webhook.mod_date),
}
dd.assert_equal(r.status_int, 200)
dd.assert_equal(r.json, expected)
def test_create_validation(self):
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
r = self.api_get(self.url + '/repo-push', status=405)
r = self.api_post(self.url + '/repo-push', status=400)
expected = {
'result': 'error',
'error': {'url': 'Please enter a value'},
}
assert_equal(r.json, expected)
data = {'url': 'qwer', 'secret': 'qwe'}
r = self.api_post(self.url + '/repo-push', status=400, **data)
expected = {
'result': 'error',
'error': {
'url': 'You must provide a full domain name (like qwer.com)'
},
}
assert_equal(r.json, expected)
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
def test_create(self):
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
data = {'url': 'http://hook.slack.com/abcd'}
limit = json.dumps({'git': 10})
with h.push_config(config, **{'webhook.repo_push.max_hooks': limit}):
msg = 'add webhook repo-push {} {}'.format(
data['url'], self.git.config.url())
with td.audits(msg):
r = self.api_post(self.url + '/repo-push', status=201, **data)
webhook = M.Webhook.query.get(hook_url=data['url'])
assert_equal(webhook.secret, 'super-secret') # secret generated
expected = {
'_id': six.text_type(webhook._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(webhook._id),
'type': 'repo-push',
'hook_url': data['url'],
'mod_date': six.text_type(webhook.mod_date),
}
dd.assert_equal(r.json, expected)
assert_equal(M.Webhook.query.find().count(), len(self.webhooks) + 1)
def test_create_duplicates(self):
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
data = {'url': self.webhooks[0].hook_url}
limit = json.dumps({'git': 10})
with h.push_config(config, **{'webhook.repo_push.max_hooks': limit}):
r = self.api_post(self.url + '/repo-push', status=400, **data)
expected = {'result': 'error',
'error': '_the_form: "repo-push" webhook already '
'exists for Git http://httpbin.org/post/0'}
assert_equal(r.json, expected)
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
def test_create_limit_reached(self):
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
data = {'url': 'http://hook.slack.com/abcd'}
r = self.api_post(self.url + '/repo-push', status=400, **data)
expected = {
'result': 'error',
'limits': {'max': 3, 'used': 3},
'error': 'You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app'}
assert_equal(r.json, expected)
assert_equal(M.Webhook.query.find().count(), len(self.webhooks))
def test_edit_validation(self):
webhook = self.webhooks[0]
url = '{}/repo-push/{}'.format(self.url, webhook._id)
data = {'url': 'qwe', 'secret': 'qwe'}
r = self.api_post(url, status=400, **data)
expected = {
'result': 'error',
'error': {
'url': 'You must provide a full domain name (like qwe.com)'
},
}
assert_equal(r.json, expected)
def test_edit(self):
webhook = self.webhooks[0]
url = '{}/repo-push/{}'.format(self.url, webhook._id)
# change only url
data = {'url': 'http://hook.slack.com/abcd'}
msg = ('edit webhook repo-push\n'
'http://httpbin.org/post/0 => http://hook.slack.com/abcd\n')
with td.audits(msg):
r = self.api_post(url, status=200, **data)
webhook = M.Webhook.query.get(_id=webhook._id)
assert_equal(webhook.hook_url, data['url'])
assert_equal(webhook.secret, 'secret-0')
expected = {
'_id': six.text_type(webhook._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(webhook._id),
'type': 'repo-push',
'hook_url': data['url'],
'mod_date': six.text_type(webhook.mod_date),
}
dd.assert_equal(r.json, expected)
# change only secret
data = {'secret': 'new-secret'}
msg = ('edit webhook repo-push\n'
'http://hook.slack.com/abcd => http://hook.slack.com/abcd\n'
'secret changed')
with td.audits(msg):
r = self.api_post(url, status=200, **data)
webhook = M.Webhook.query.get(_id=webhook._id)
assert_equal(webhook.hook_url, 'http://hook.slack.com/abcd')
assert_equal(webhook.secret, 'new-secret')
expected = {
'_id': six.text_type(webhook._id),
'url': 'http://localhost/rest/adobe/adobe-1/admin'
'/src/webhooks/repo-push/{}'.format(webhook._id),
'type': 'repo-push',
'hook_url': 'http://hook.slack.com/abcd',
'mod_date': six.text_type(webhook.mod_date),
}
dd.assert_equal(r.json, expected)
def test_edit_duplicates(self):
webhook = self.webhooks[0]
url = '{}/repo-push/{}'.format(self.url, webhook._id)
data = {'url': 'http://httpbin.org/post/1'}
r = self.api_post(url, status=400, **data)
expected = {'result': 'error',
'error': '_the_form: "repo-push" webhook already '
'exists for Git http://httpbin.org/post/1'}
assert_equal(r.json, expected)
def test_delete_validation(self):
url = '{}/repo-push/invalid'.format(self.url)
self.api_delete(url, status=404)
def test_delete(self):
assert_equal(M.Webhook.query.find().count(), 3)
webhook = self.webhooks[0]
url = '{}/repo-push/{}'.format(self.url, webhook._id)
msg = 'delete webhook repo-push {} {}'.format(
webhook.hook_url, self.git.config.url())
with td.audits(msg):
r = self.api_delete(url, status=200)
dd.assert_equal(r.json, {'result': 'ok'})
assert_equal(M.Webhook.query.find().count(), 2)
assert_equal(M.Webhook.query.get(_id=webhook._id), None)
def test_permissions(self):
self.api_get(self.url, user='test-user', status=403)
self.api_get(self.url, user='*anonymous', status=401)
url = self.url + '/repo-push/'
self.api_post(url, user='test-user', status=403)
self.api_post(url, user='*anonymous', status=401)
url = self.url + '/repo-push/' + str(self.webhooks[0]._id)
self.api_get(url, user='test-user', status=403)
self.api_get(url, user='*anonymous', status=401)
self.api_post(url, user='test-user', status=403)
self.api_post(url, user='*anonymous', status=401)
self.api_delete(url, user='test-user', status=403)
self.api_delete(url, user='*anonymous', status=401)
|
tests/test_task_scale_images.py
|
asmeurer/nikola
| 1,901 |
140304
|
<filename>tests/test_task_scale_images.py
import os
from tempfile import NamedTemporaryFile
import pytest
from PIL import Image, ImageDraw
from nikola.plugins.task import scale_images
# These tests don't require valid profiles. They need only to verify
# that profile data is/isn't saved with images.
# It would be nice to use PIL.ImageCms to create valid profiles, but
# in many Pillow distributions ImageCms is a stub.
# ICC file data format specification:
# http://www.color.org/icc32.pdf
PROFILE = b"invalid profile data"
def test_handling_icc_profiles(test_images, destination_dir):
filename, expected_profile = test_images
pathname = os.path.join(str(destination_dir), filename)
assert os.path.exists(pathname), pathname
img = Image.open(pathname)
actual_profile = img.info.get("icc_profile")
assert actual_profile == expected_profile
@pytest.fixture(
params=[
pytest.param(True, id="with icc filename"),
pytest.param(False, id="without icc filename"),
]
)
def test_images(request, preserve_icc_profiles, source_dir, site):
image_filename = create_src_image(str(source_dir), request.param)
run_task(site)
if request.param:
yield image_filename, PROFILE if preserve_icc_profiles else None
else:
yield image_filename, None
@pytest.fixture(
params=[
pytest.param(True, id="profiles preserved"),
pytest.param(False, id="profiles not preserved"),
]
)
def preserve_icc_profiles(request):
return request.param
@pytest.fixture
def source_dir(tmpdir_factory):
return tmpdir_factory.mktemp("image_source")
@pytest.fixture
def site(preserve_icc_profiles, source_dir, destination_dir):
config = {
"IMAGE_FOLDERS": {str(source_dir): ""},
"OUTPUT_FOLDER": str(destination_dir),
"IMAGE_THUMBNAIL_SIZE": 128,
"IMAGE_THUMBNAIL_FORMAT": "{name}.thumbnail{ext}",
"MAX_IMAGE_SIZE": 512,
"FILTERS": {},
"PRESERVE_EXIF_DATA": False,
"EXIF_WHITELIST": {},
"PRESERVE_ICC_PROFILES": preserve_icc_profiles,
}
return FakeSite(config)
class FakeSite:
def __init__(self, config):
self.config = config
self.debug = True
@pytest.fixture
def destination_dir(tmpdir_factory):
return tmpdir_factory.mktemp("image_output")
def run_task(site):
task_instance = get_task_instance(site)
for task in task_instance.gen_tasks():
for action, args in task.get("actions", []):
action(*args)
def get_task_instance(site):
result = scale_images.ScaleImage()
result.set_site(site)
return result
def create_src_image(testdir, use_icc_profile):
img = create_test_image()
pathname = tmp_img_name(testdir)
# Test two variants: with and without an associated icc_profile
if use_icc_profile:
img.save(pathname, icc_profile=PROFILE)
else:
img.save(pathname)
return os.path.basename(pathname)
def create_test_image():
# Make a white image with a red stripe on the diagonal.
width = 64
height = 64
img = Image.new("RGB", (width, height), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.line((0, 0, width, height), fill=(255, 128, 128))
draw.line((width, 0, 0, height), fill=(128, 128, 255))
return img
def tmp_img_name(dirname):
pathname = NamedTemporaryFile(suffix=".jpg", dir=dirname, delete=False)
return pathname.name
|
bdd_data/label2det_v1.py
|
Erotemic/bdd-data
| 131 |
140324
|
<reponame>Erotemic/bdd-data
import argparse
import json
import os
from os import path as osp
import sys
def parse_args():
"""Use argparse to get command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('label_dir', help='path to the label dir')
parser.add_argument('det_path', help='path to output detection file')
args = parser.parse_args()
return args
def label2det(label):
boxes = list()
for frame in label['frames']:
for obj in frame['objects']:
if 'box2d' not in obj:
continue
xy = obj['box2d']
if xy['x1'] >= xy['x2'] or xy['y1'] >= xy['y2']:
continue
box = {'name': label['name'],
'timestamp': frame['timestamp'],
'category': obj['category'],
'bbox': [xy['x1'], xy['y1'], xy['x2'], xy['y2']],
'score': 1}
boxes.append(box)
return boxes
def change_dir(label_dir, det_path):
if not osp.exists(label_dir):
print('Can not find', label_dir)
return
print('Processing', label_dir)
input_names = [n for n in os.listdir(label_dir)
if osp.splitext(n)[1] == '.json']
boxes = []
count = 0
for name in input_names:
in_path = osp.join(label_dir, name)
out = label2det(json.load(open(in_path, 'r')))
boxes.extend(out)
count += 1
if count % 1000 == 0:
print('Finished', count)
with open(det_path, 'w') as fp:
json.dump(boxes, fp, indent=4, separators=(',', ': '))
def main():
args = parse_args()
change_dir(args.label_dir, args.det_path)
if __name__ == '__main__':
main()
|
Tools/MonoGenerator/__main__.py
|
mortend/fuse-studio
| 324 |
140352
|
<gh_stars>100-1000
import subprocess
import argparse
import os
import urllib
import tarfile
import glob
import shutil
from os import path
from install_name_tool import *
import re
containing_dir = path.dirname(path.realpath(__file__))
working_dir = os.getcwd()
parser = argparse.ArgumentParser(description='\
Generates a minified mono package, that can be bundled and shipped. \
By default it uses the current installed mono as the base for the minification. \
However use `--mono-version` to specify a specific version to use, or use `--custom-mono`.')
parser.add_argument('--mono-version', help='\
A mono version number, that will be used as reference eg. `4.8.1` \
which will be resolved to `/Library/Frameworks/Mono.framework/Versions/4.8.1`.')
parser.add_argument('--custom-mono', help='\
A custom path to mono. NOTE: MONO_VERSION is ignored in that case. \
This path should be the root path to mono. \
Eg. `/Library/Frameworks/Mono.framework/Versions/4.4.1`. This mode is useful in case of a custom mono build.')
parser.add_argument('--libgdiplus-location', help="\
Specify the location of libgdiplus (by default found in [MONO_ROOT]/lib). This option expects a directory path to where libgdiplus is located. \
Used when building from source, since libgdiplus isn't part of that process by default.")
parser.add_argument('--libs-install-id', default='@rpath/Mono/lib', help="\
Override the install-id that is set for the native libraries. This id is by default '@rpath/Mono/lib'. \
")
parser.add_argument('--clean', default=False, action='store_true', help='Cleanup what is left from last time this command was ran.')
parser.add_argument('--no-pack', default=False, action='store_true', help="Don't pack the result with stuff.")
args = parser.parse_args()
if args.custom_mono:
mono_base = args.custom_mono
elif args.mono_version:
mono_base = path.join("/Library/Frameworks/Mono.framework/Versions/", args.mono_version)
else:
mono_base = path.realpath("/Library/Frameworks/Mono.framework/Versions/Current")
mono_base_libdir = path.join(mono_base, 'lib')
if args.libgdiplus_location:
gdipluslib_location = args.libgdiplus_location
else:
gdipluslib_location = mono_base_libdir
install_path_prefix = mono_base
stuff_path = path.join(containing_dir, "..", "..", "Stuff", "stuff")
minimized_output = path.join(working_dir, 'mono-minimized')
def create_dirs_if_required(dir_path):
if not path.exists(dir_path):
os.makedirs(dir_path)
def match_file_recursive(path, extension):
for root, dirnames, filenames in os.walk(path):
files_in_question = [filename for filename in filenames if filename.endswith(extension)]
for filename in files_in_question:
yield os.path.join(root, filename)
dirs_in_question = [dirname for dirname in dirnames if dirname.endswith(extension)]
for dirname in dirs_in_question:
yield os.path.join(root, dirname)
def read_all_text(path):
with open(path, 'r') as content_file:
return content_file.read()
def write_all_text(path, content):
with open(path, 'w') as content_file:
content_file.write(content)
def minimize():
print("# Starting minimization")
bin_folder = path.join(minimized_output, 'bin')
lib_folder = path.join(minimized_output, 'lib')
etc_folder = path.join(minimized_output, 'etc')
create_dirs_if_required(bin_folder)
create_dirs_if_required(lib_folder)
create_dirs_if_required(etc_folder)
print("Copying files")
mono_exe_path = path.join(install_path_prefix, 'bin', 'mono64')
if not path.exists(mono_exe_path):
mono_exe_path = path.join(install_path_prefix, 'bin', 'mono')
shutil.copy(mono_exe_path, path.join(bin_folder, 'mono'))
add_rpath(path.join(bin_folder, 'mono'), '@executable_path/../../')
shutil.copytree(path.join(install_path_prefix, 'lib', 'mono', '4.5'), path.join(lib_folder, 'mono', '4.5'), symlinks=True)
shutil.copytree(path.join(install_path_prefix, 'lib', 'mono', 'gac'), path.join(lib_folder, 'mono', 'gac'), symlinks=True)
shutil.copytree(path.join(install_path_prefix, 'etc', 'mono'), path.join(etc_folder, 'mono'), symlinks=True)
mono_config = read_all_text(path.join(etc_folder, 'mono', 'config'))
mono_config = re.sub("\$mono_libdir", args.libs_install_id, mono_config)
mono_config = re.sub(re.escape(mono_base_libdir) + "/*", "", mono_config)
write_all_text(path.join(etc_folder, 'mono', 'config'), mono_config)
create_dirs_if_required(path.join(etc_folder, 'fonts'))
shutil.copy(path.join(containing_dir, 'fonts.conf'), path.join(etc_folder, 'fonts'))
print("Removing unnecessary files as: *.exe, *.mdb, *.dSYM")
for f in match_file_recursive(minimized_output, ".exe"):
os.remove(f)
for f in match_file_recursive(minimized_output, ".mdb"):
os.remove(f)
for d in match_file_recursive(minimized_output, ".dSYM"):
shutil.rmtree(d, ignore_errors=True)
os.remove(path.join(lib_folder, 'mono', '4.5', 'mcs.exe.dylib'))
assemblies_4_5 = path.join(lib_folder, 'mono', '4.5')
assemblies_gac = path.join(lib_folder, 'mono', 'gac')
assemblies_to_exclude = [
"Microsoft.Build*",
"RabbitMQ*",
"monodoc*",
"nunit*",
"PEAPI*",
# "*FSharp*",
"Mono.Cecil.VB*",
"Microsoft.VisualBasic*",
"Mono.WebServer2*",
"*gtk-sharp*",
"*gdk-sharp*",
"*glib-sharp*",
"*pango-sharp*",
"*atk-sharp*",
"*glade-sharp*",
"*gtk-dotnet*",
"vbnc.rsp",
"*Microsoft.CodeAnalysis*",
"*.pdb"]
for list_of_files in [glob.glob(path.join(assemblies_4_5, pattern)) for pattern in assemblies_to_exclude]:
for f in list_of_files:
if path.exists(f): os.remove(f)
for list_of_dirs in [glob.glob(path.join(assemblies_gac, pattern)) for pattern in assemblies_to_exclude]:
for d in list_of_dirs:
shutil.rmtree(d, ignore_errors=False)
print("Copying library dependencies")
source_base_paths = [install_path_prefix, gdipluslib_location]
copy_lib_and_dependencies(path.join(install_path_prefix, 'lib', 'libmonosgen-2.0.dylib'), lib_folder, args.libs_install_id, source_base_paths)
copy_lib_and_dependencies(path.join(install_path_prefix, 'lib', 'libMonoPosixHelper.dylib'), lib_folder, args.libs_install_id, source_base_paths)
copy_lib_and_dependencies(path.join(gdipluslib_location, "libgdiplus.dylib"), lib_folder, args.libs_install_id, source_base_paths)
print("Finding remaining absolute paths to mono install")
fixup_all_dylib_references(lib_folder, args.libs_install_id, source_base_paths)
print("Done with minimization")
def clean():
print("# Cleaning up")
if path.exists(minimized_output):
shutil.rmtree(minimized_output)
if args.clean:
clean()
minimize()
if not args.no_pack:
print("# Packing everything with stuff")
subprocess.check_call([stuff_path, "pack", "--name", "Mono", "--condition", "OSX", "--out-dir", working_dir, minimized_output])
size_in_mb = os.stat(path.join(working_dir, 'Mono.zip')).st_size / 1024 / 1024
print("Final size: " + str(size_in_mb) + " MiB")
clean()
|
empire.py
|
chenxiangfang/Empire
| 2,541 |
140401
|
#! /usr/bin/env python3
import empire.arguments as arguments
import sys
if __name__ == '__main__':
args = arguments.args
if args.subparser_name == 'server':
import empire.server.server as server
server.run(args)
elif args.subparser_name == 'client':
import empire.client.client as client
client.start()
sys.exit(0)
|
venv/Lib/site-packages/IPython/nbformat.py
|
ajayiagbebaku/NFL-Model
| 6,989 |
140406
|
"""
Shim to maintain backwards compatibility with old IPython.nbformat imports.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from IPython.utils.shimmodule import ShimModule, ShimWarning
warn("The `IPython.nbformat` package has been deprecated since IPython 4.0. "
"You should import from nbformat instead.", ShimWarning)
# Unconditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
sys.modules['IPython.nbformat'] = ShimModule(
src='IPython.nbformat', mirror='nbformat')
|
test/unit_test/controller/test_callback.py
|
ArnovanHilten/NVFlare
| 155 |
140412
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import pytest
from nvflare.apis.controller_spec import ClientTask, Task, TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from .controller_test import TestController, create_client, create_task, get_ready, launch_task
def _get_task_done_callback_test_cases():
task_name = "__test_task"
def task_done_cb(task: Task, fl_ctx: FLContext):
client_names = [x.client.name for x in task.client_tasks]
expected_str = "_".join(client_names)
task.props[task_name] = expected_str
input_data = Shareable()
test_cases = [
[
"broadcast",
[create_client(f"__test_client{i}") for i in range(10)],
task_name,
input_data,
task_done_cb,
"_".join([f"__test_client{i}" for i in range(10)]),
],
[
"broadcast_and_wait",
[create_client(f"__test_client{i}") for i in range(10)],
task_name,
input_data,
task_done_cb,
"_".join([f"__test_client{i}" for i in range(10)]),
],
["send", [create_client("__test_client")], task_name, input_data, task_done_cb, "__test_client"],
["send_and_wait", [create_client("__test_client")], task_name, input_data, task_done_cb, "__test_client"],
["relay", [create_client("__test_client")], task_name, input_data, task_done_cb, "__test_client"],
["relay_and_wait", [create_client("__test_client")], task_name, input_data, task_done_cb, "__test_client"],
]
return test_cases
class TestCallback(TestController):
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_before_task_sent_cb(self, method):
def before_task_sent_cb(client_task: ClientTask, fl_ctx: FLContext):
client_task.task.data["_test_data"] = client_task.client.name
client_name = "_test_client"
controller, fl_ctx = self.start_controller()
client = create_client(name=client_name)
task = create_task("__test_task", before_task_sent_cb=before_task_sent_cb)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
task_name_out, _, data = controller.process_task_request(client, fl_ctx)
expected = Shareable()
expected["_test_data"] = client_name
assert data == expected
controller.cancel_task(task)
assert task.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_result_received_cb(self, method):
def result_received_cb(client_task: ClientTask, fl_ctx: FLContext):
client_task.result["_test_data"] = client_task.client.name
client_name = "_test_client"
input_data = Shareable()
input_data["_test_data"] = "_old_data"
controller, fl_ctx = self.start_controller()
client = create_client(name=client_name)
task = create_task("__test_task", data=input_data, result_received_cb=result_received_cb)
kwargs = {"targets": [client]}
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": kwargs,
},
)
get_ready(launch_thread)
task_name_out, client_task_id, data = controller.process_task_request(client, fl_ctx)
controller.process_submission(
client=client, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=data
)
expected = Shareable()
expected["_test_data"] = client_name
assert task.last_client_task_map[client_name].result == expected
controller._check_tasks()
assert task.completion_status == TaskCompletionStatus.OK
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("task_complete", ["normal", "timeout", "cancel"])
@pytest.mark.parametrize("method,clients,task_name,input_data,cb,expected", _get_task_done_callback_test_cases())
def test_task_done_cb(self, method, clients, task_name, input_data, cb, expected, task_complete):
controller, fl_ctx = self.start_controller()
timeout = 0 if task_complete != "timeout" else 1
task = create_task("__test_task", data=input_data, task_done_cb=cb, timeout=timeout)
kwargs = {"targets": clients}
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": kwargs,
},
)
get_ready(launch_thread)
client_task_ids = len(clients) * [None]
for i, client in enumerate(clients):
task_name_out, client_task_ids[i], _ = controller.process_task_request(client, fl_ctx)
if task_name_out == "":
client_task_ids[i] = None
# in here we make up client results:
result = Shareable()
result["result"] = "result"
for client, client_task_id in zip(clients, client_task_ids):
if client_task_id is not None:
if task_complete == "normal":
controller.process_submission(
client=client, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=result
)
if task_complete == "timeout":
time.sleep(timeout)
assert task.completion_status == TaskCompletionStatus.TIMEOUT
elif task_complete == "cancel":
controller.cancel_task(task)
assert task.completion_status == TaskCompletionStatus.CANCELLED
controller._check_tasks()
assert task.props[task_name] == expected
assert controller.get_num_standing_tasks() == 0
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_task_before_send_cb(self, method):
def before_task_sent_cb(client_task: ClientTask, fl_ctx: FLContext):
client_task.task.completion_status = TaskCompletionStatus.CANCELLED
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task("__test_task", before_task_sent_cb=before_task_sent_cb)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
task_name_out, client_task_id, data = controller.process_task_request(client, fl_ctx)
assert task_name_out == ""
assert client_task_id == ""
launch_thread.join()
assert task.completion_status == TaskCompletionStatus.CANCELLED
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_task_result_received_cb(self, method):
def result_received_cb(client_task: ClientTask, fl_ctx: FLContext):
client_task.task.completion_status = TaskCompletionStatus.CANCELLED
controller, fl_ctx = self.start_controller()
client1 = create_client(name="__test_client")
client2 = create_client(name="__another_client")
task = create_task("__test_task", result_received_cb=result_received_cb)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client1, client2]},
},
)
get_ready(launch_thread)
task_name_out, client_task_id, data = controller.process_task_request(client1, fl_ctx)
result = Shareable()
result["__result"] = "__test_result"
controller.process_submission(
client=client1, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=result
)
assert task.last_client_task_map["__test_client"].result == result
task_name_out, client_task_id, data = controller.process_task_request(client2, fl_ctx)
assert task_name_out == ""
assert client_task_id == ""
launch_thread.join()
assert task.completion_status == TaskCompletionStatus.CANCELLED
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("method2", ["broadcast", "send", "relay"])
def test_schedule_task_before_send_cb(self, method, method2):
def before_task_sent_cb(client_task: ClientTask, fl_ctx: FLContext):
controller = fl_ctx.get_prop(key="controller")
new_task = create_task("__new_test_task")
inner_launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": new_task,
"method": method2,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client_task.client]},
},
)
inner_launch_thread.start()
inner_launch_thread.join()
controller, fl_ctx = self.start_controller()
fl_ctx.set_prop("controller", controller)
client = create_client(name="__test_client")
task = create_task("__test_task", before_task_sent_cb=before_task_sent_cb)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
launch_thread.start()
task_name_out = ""
while task_name_out == "":
task_name_out, _, _ = controller.process_task_request(client, fl_ctx)
time.sleep(0.1)
assert task_name_out == "__test_task"
new_task_name_out = ""
while new_task_name_out == "":
new_task_name_out, _, _ = controller.process_task_request(client, fl_ctx)
time.sleep(0.1)
assert new_task_name_out == "__new_test_task"
controller.cancel_task(task)
assert task.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("method2", ["broadcast", "send", "relay"])
def test_schedule_task_result_received_cb(self, method, method2):
def result_received_cb(client_task: ClientTask, fl_ctx: FLContext):
controller = fl_ctx.get_prop(key="controller")
new_task = create_task("__new_test_task")
inner_launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": new_task,
"method": method2,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client_task.client]},
},
)
get_ready(inner_launch_thread)
inner_launch_thread.join()
controller, fl_ctx = self.start_controller()
fl_ctx.set_prop("controller", controller)
client = create_client(name="__test_client")
task = create_task("__test_task", result_received_cb=result_received_cb)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
launch_thread.start()
task_name_out = ""
client_task_id = ""
data = None
while task_name_out == "":
task_name_out, client_task_id, data = controller.process_task_request(client, fl_ctx)
time.sleep(0.1)
assert task_name_out == "__test_task"
controller.process_submission(
client=client, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=data
)
controller._check_tasks()
assert controller.get_num_standing_tasks() == 1
new_task_name_out = ""
while new_task_name_out == "":
new_task_name_out, _, _ = controller.process_task_request(client, fl_ctx)
time.sleep(0.1)
assert new_task_name_out == "__new_test_task"
launch_thread.join()
self.stop_controller(controller, fl_ctx)
|
cls/TimeText.py
|
hermanb001/Free-Node-Merge-1
| 192 |
140422
|
<gh_stars>100-1000
#!/usr/bin/env python3
import requests
import os
import time
class TimeText(): # 将订阅链接中YAML,Base64等内容转换为 Url 链接内容
# 读取本地文件的修改时间
def get_local_file_modifiedtime(fname):
try:
modifiedTime = time.localtime(os.stat(fname).st_mtime)
#mTime = time.strftime('%Y-%m-%d %H:%M:%S', modifiedTime)
return time.asctime(modifiedTime)
except Exception as ex:
print("Line-35-local_file: get local file modified time error. \n" + str(ex))
return 0
# 读取本地文件的创建时间
def get_local_file_created_time(fname):
try:
createdTime = time.localtime(os.stat(fname).st_ctime)
#cTime = time.strftime('%Y-%m-%d %H:%M:%S', createdTime)
return createdTime
except Exception as ex:
print("Line-45-local_file: get local file modified time error. \n" + str(ex))
return 0
|
3rd_party/py/defusedxml_local/defusedxml/ElementTree.py
|
Wlgen/force-riscv
| 111 |
140454
|
<gh_stars>100-1000
# defusedxml
#
# Copyright (c) 2013 by <NAME> <<EMAIL>>
# Licensed to PSF under a Contributor Agreement.
# See https://www.python.org/psf/license for licensing details.
"""Defused xml.etree.ElementTree facade
"""
from __future__ import print_function, absolute_import
import sys
import warnings
from xml.etree.ElementTree import TreeBuilder as _TreeBuilder
from xml.etree.ElementTree import parse as _parse
from xml.etree.ElementTree import tostring
from .common import PY3
if PY3:
import importlib
else:
from xml.etree.ElementTree import XMLParser as _XMLParser
from xml.etree.ElementTree import iterparse as _iterparse
from xml.etree.ElementTree import ParseError
from .common import (
DTDForbidden,
EntitiesForbidden,
ExternalReferenceForbidden,
_generate_etree_functions,
)
__origin__ = "xml.etree.ElementTree"
def _get_py3_cls():
"""Python 3.3 hides the pure Python code but defusedxml requires it.
The code is based on test.support.import_fresh_module().
"""
pymodname = "xml.etree.ElementTree"
cmodname = "_elementtree"
pymod = sys.modules.pop(pymodname, None)
cmod = sys.modules.pop(cmodname, None)
sys.modules[cmodname] = None
pure_pymod = importlib.import_module(pymodname)
if cmod is not None:
sys.modules[cmodname] = cmod
else:
sys.modules.pop(cmodname)
sys.modules[pymodname] = pymod
_XMLParser = pure_pymod.XMLParser
_iterparse = pure_pymod.iterparse
ParseError = pure_pymod.ParseError
return _XMLParser, _iterparse, ParseError
if PY3:
_XMLParser, _iterparse, ParseError = _get_py3_cls()
_sentinel = object()
class DefusedXMLParser(_XMLParser):
def __init__(
self,
html=_sentinel,
target=None,
encoding=None,
forbid_dtd=False,
forbid_entities=True,
forbid_external=True,
):
# Python 2.x old style class
_XMLParser.__init__(self, target=target, encoding=encoding)
if html is not _sentinel:
# the 'html' argument has been deprecated and ignored in all
# supported versions of Python. Python 3.8 finally removed it.
if html:
raise TypeError("'html=True' is no longer supported.")
else:
warnings.warn(
"'html' keyword argument is no longer supported. Pass "
"in arguments as keyword arguments.",
category=DeprecationWarning,
)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
if PY3:
parser = self.parser
else:
parser = self._parser
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = (
self.defused_unparsed_entity_decl
)
if self.forbid_external:
parser.ExternalEntityRefHandler = (
self.defused_external_entity_ref_handler
)
def defused_start_doctype_decl(
self, name, sysid, pubid, has_internal_subset
):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(
self,
name,
is_parameter_entity,
value,
base,
sysid,
pubid,
notation_name,
):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(
self, name, base, sysid, pubid, notation_name
):
# expat 1.2
raise EntitiesForbidden(
name, None, base, sysid, pubid, notation_name
) # pragma: no cover
def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
# aliases
# XMLParse is a typo, keep it for backwards compatibility
XMLTreeBuilder = XMLParse = XMLParser = DefusedXMLParser
parse, iterparse, fromstring = _generate_etree_functions(
DefusedXMLParser, _TreeBuilder, _parse, _iterparse
)
XML = fromstring
__all__ = [
"ParseError",
"XML",
"XMLParse",
"XMLParser",
"XMLTreeBuilder",
"fromstring",
"iterparse",
"parse",
"tostring",
]
|
pypeit/scripts/chk_edges.py
|
ykwang1/PypeIt
| 107 |
140480
|
<reponame>ykwang1/PypeIt<filename>pypeit/scripts/chk_edges.py
"""
This script displays the Trace image and the traces in an RC Ginga window.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from pypeit.scripts import scriptbase
class ChkEdges(scriptbase.ScriptBase):
@classmethod
def get_parser(cls, width=None):
parser = super().get_parser(description='Display MasterEdges image and trace data',
width=width)
parser.add_argument('trace_file', type=str, default = None,
help='PypeIt Master Trace file [e.g. MasterEdges_A_01_aa.fits.gz]')
parser.add_argument('--chname', default='MTrace', type=str,
help='Channel name for image in Ginga')
parser.add_argument('--mpl', default=False, action='store_true',
help='Use a matplotlib window instead of ginga to show the trace')
parser.add_argument('--try_old', default=False, action='store_true',
help='Attempt to load old datamodel versions. A crash may ensue..')
return parser
# TODO: JFH I don't see why we are showing the edges and/or what the purpose
# of all this if synced not synced is fore. edgetrace seems to crash if the
# syncing fails. So if we have successfuly run EdgeTrace, we create a
# slittrace object and the slittrace object is the thing we should be
# showing not the edgetrace object. This has the advantage that then orders
# are correctly labeled for Echelle which is not the case with the current
# show method.
@staticmethod
def main(pargs):
from pypeit import edgetrace
# Load
edges = edgetrace.EdgeTraceSet.from_file(pargs.trace_file, chk_version=(not pargs.try_old))
if pargs.mpl:
edges.show(thin=10, include_img=True, idlabel=True)
else:
edges.show(thin=10, in_ginga=True)
return 0
|
pyformance/reporters/console_reporter.py
|
boarik/pyformance
| 167 |
140486
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import datetime
from .reporter import Reporter
class ConsoleReporter(Reporter):
"""
Show metrics in a human readable form.
This is useful for debugging if you want to read the current state on the console.
"""
def __init__(
self, registry=None, reporting_interval=30, stream=sys.stderr, clock=None
):
super(ConsoleReporter, self).__init__(registry, reporting_interval, clock)
self.stream = stream
def report_now(self, registry=None, timestamp=None):
metrics = self._collect_metrics(registry or self.registry, timestamp)
for line in metrics:
print(line, file=self.stream)
def _collect_metrics(self, registry, timestamp=None):
timestamp = timestamp or int(round(self.clock.time()))
dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp)
metrics = registry.dump_metrics()
metrics_data = [
"== %s ==================================="
% dt.strftime("%Y-%m-%d %H:%M:%S")
]
for key in metrics.keys():
values = metrics[key]
metrics_data.append("%s:" % key)
for value_key in values.keys():
metrics_data.append("%20s = %s" % (value_key, values[value_key]))
metrics_data.append("")
return metrics_data
|
modules/t_google_play_download/gplay-cli/ext_libs/androguard/core/bytecodes/jvm.py
|
napoler/t-drupal-module
| 285 |
140519
|
# This file is part of Androguard.
#
# Copyright (C) 2012, <NAME> <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from struct import pack, unpack, calcsize
from collections import namedtuple
import re, zipfile, StringIO, os
from androguard.core import bytecode
from androguard.core.bytecode import SV, SVs
######################################################## JAR FORMAT ########################################################
class JAR :
def __init__(self, filename, raw=False) :
self.filename = filename
if raw == True :
self.__raw = filename
else :
fd = open( filename, "rb" )
self.__raw = fd.read()
fd.close()
self.zip = zipfile.ZipFile( StringIO.StringIO( self.__raw ) )
def get_classes(self) :
l = []
for i in self.zip.namelist() :
if ".class" in i :
l.append( (i, self.zip.read(i)) )
return l
def show(self) :
print self.zip.namelist()
######################################################## CLASS FORMAT ########################################################
# Special functions to manage more easily special arguments of bytecode
def special_F0(x) :
return [ i for i in x ]
def special_F0R(x) :
return [ x ]
def special_F1(x) :
return (x[0] << 8) | x[1]
def special_F1R(x) :
return [ (x & 0xFF00) >> 8, x & 0x00FF ]
def special_F2(x) :
v = ((x[0] << 8) | x[1])
if v > 0x7FFF :
v = (0x7FFF & v) - 0x8000
return v
def special_F2R(x) :
val = x & 0xFFFF
return [ (val & 0xFF00) >> 8, val & 0x00FF ]
def special_F3(x) :
val = (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3]
if val > 0x7fffffff :
val = (0x7fffffff & val) - 0x80000000
return val
def special_F3R(x) :
val = x & 0xFFFFFFFF
return [ (val & 0xFF000000) >> 24, (val & 0x00FF0000) >> 16, (val & 0x0000FF00) >> 8, val & 0x000000FF ]
def special_F4(x) :
return [ (x[0] << 8) | x[1], x[2] ]
def special_F4R(x) :
pass
def specialSwitch(x) :
return x
FD = { "B" : "byte",
"C" : "char",
"D" : "double",
"F" : "float",
"I" : "int",
"J" : "long",
"S" : "short",
"Z" : "boolean",
"V" : "void",
}
def formatFD(v) :
#print v, "--->",
l = []
i = 0
while i < len(v) :
if v[i] == "L" :
base_object = ""
i = i + 1
while v[i] != ";" :
base_object += v[i]
i = i + 1
l.append( os.path.basename( base_object ) )
elif v[i] == "[" :
z = []
while v[i] == "[" :
z.append( "[]" )
i = i + 1
l.append( [ FD[ v[i] ], z ] )
else :
l.append( FD[ v[i] ] )
i = i + 1
#print l
return l
def TableSwitch(idx, raw_format) :
r_buff = []
r_format = ">"
idx = idx + 1
n = 0
if idx % 4 :
n = 4 - (idx % 4)
for i in range(0, n) :
r_buff.append( "bytepad%d" % i )
r_format += "B"
r_buff.extend( [ "default", "low", "high" ] )
r_format += "LLL"
idx = 1 + n + 4
low = unpack('>L', raw_format[ idx : idx + 4 ])[0]
idx = idx + 4
high = unpack('>L', raw_format[ idx : idx + 4 ])[0]
for i in range(0, high - low + 1) :
r_buff.append( "offset%d" % i )
r_format += "L"
return specialSwitch, specialSwitch, r_buff, r_format, None
def LookupSwitch(idx, raw_format) :
r_buff = []
r_format = ">"
idx = idx + 1
n = 0
if idx % 4 :
n = 4 - (idx % 4)
for i in range(0, n) :
r_buff.append( "bytepad%d" % i )
r_format += "B"
r_buff.extend( [ "default", "npairs" ] )
r_format += "LL"
idx = 1 + n + 4
for i in range(0, unpack('>L', raw_format[ idx : idx + 4 ])[0]) :
r_buff.extend( [ "match%d" % i, "offset%d" % i ] )
r_format += "LL"
return specialSwitch, specialSwitch, r_buff, r_format, None
# The list of java bytecodes, with their value, name, and special functions !
JAVA_OPCODES = {
0x32 : [ "aaload" ],
0x53 : [ "aastore" ],
0x1 : [ "aconst_null" ],
0x19 : [ "aload", "index:B", special_F0, special_F0, None ],
0x2a : [ "aload_0" ],
0x2b : [ "aload_1" ],
0x2c : [ "aload_2" ],
0x2d : [ "aload_3" ],
0xbd : [ "anewarray", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_class" ],
0xb0 : [ "areturn" ],
0xbe : [ "arraylength" ],
0x3a : [ "astore", "index:B", special_F0, special_F0, None ],
0x4b : [ "astore_0" ],
0x4c : [ "astore_1" ],
0x4d : [ "astore_2" ],
0x4e : [ "astore_3" ],
0xbf : [ "athrow" ],
0x33 : [ "baload" ],
0x54 : [ "bastore" ],
0x10 : [ "bipush", "byte:B", special_F0, special_F0R, None ],
0x34 : [ "caload" ],
0x55 : [ "castore" ],
0xc0 : [ "checkcast", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, None ],
0x90 : [ "d2f" ],
0x8e : [ "d2i" ],
0x8f : [ "d2l" ],
0x63 : [ "dadd" ],
0x31 : [ "daload" ],
0x52 : [ "dastore" ],
0x98 : [ "dcmpg" ],
0x97 : [ "dcmpl" ],
0xe : [ "dconst_0" ],
0xf : [ "dconst_1" ],
0x6f : [ "ddiv" ],
0x18 : [ "dload", "index:B", special_F0, special_F0, None ],
0x26 : [ "dload_0" ],
0x27 : [ "dload_1" ],
0x28 : [ "dload_2" ],
0x29 : [ "dload_3" ],
0x6b : [ "dmul" ],
0x77 : [ "dneg" ],
0x73 : [ "drem" ],
0xaf : [ "dreturn" ],
0x39 : [ "dstore", "index:B", special_F0, special_F0, None ],
0x47 : [ "dstore_0" ],
0x48 : [ "dstore_1" ],
0x49 : [ "dstore_2" ],
0x4a : [ "dstore_3" ],
0x67 : [ "dsub" ],
0x59 : [ "dup" ],
0x5a : [ "dup_x1" ],
0x5b : [ "dup_x2" ],
0x5c : [ "dup2" ],
0x5d : [ "dup2_x1" ],
0x5e : [ "dup2_x2" ],
0x8d : [ "f2d" ],
0x8b : [ "f2i" ],
0x8c : [ "f2l" ],
0x62 : [ "fadd" ],
0x30 : [ "faload" ],
0x51 : [ "fastore" ],
0x96 : [ "fcmpg" ],
0x95 : [ "fcmpl" ],
0xb : [ "fconst_0" ],
0xc : [ "fconst_1" ],
0xd : [ "fconst_2" ],
0x6e : [ "fdiv" ],
0x17 : [ "fload", "index:B", special_F0, special_F0, None ],
0x22 : [ "fload_0" ],
0x23 : [ "fload_1" ],
0x24 : [ "fload_2" ],
0x25 : [ "fload_3" ],
0x6a : [ "fmul" ],
0x76 : [ "fneg" ],
0x72 : [ "frem" ],
0xae : [ "freturn" ],
0x38 : [ "fstore", "index:B", special_F0, special_F0, None ],
0x43 : [ "fstore_0" ],
0x44 : [ "fstore_1" ],
0x45 : [ "fstore_2" ],
0x46 : [ "fstore_3" ],
0x66 : [ "fsub" ],
0xb4 : [ "getfield", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field" ],
0xb2 : [ "getstatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xa7 : [ "goto", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc8 : [ "goto_w", "branchbyte1:B branchbyte2:B branchbyte3:B branchbyte4:B", special_F3, special_F3R, None ],
0x91 : [ "i2b" ],
0x92 : [ "i2c" ],
0x87 : [ "i2d" ],
0x86 : [ "i2f" ],
0x85 : [ "i2l" ],
0x93 : [ "i2s" ],
0x60 : [ "iadd" ],
0x2e : [ "iaload" ],
0x7e : [ "iand" ],
0x4f : [ "iastore" ],
0x2 : [ "iconst_m1" ],
0x3 : [ "iconst_0" ],
0x4 : [ "iconst_1" ],
0x5 : [ "iconst_2" ],
0x6 : [ "iconst_3" ],
0x7 : [ "iconst_4" ],
0x8 : [ "iconst_5" ],
0x6c : [ "idiv" ],
0xa5 : [ "if_acmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa6 : [ "if_acmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9f : [ "if_icmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa0 : [ "if_icmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa1 : [ "if_icmplt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa2 : [ "if_icmpge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa3 : [ "if_icmpgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa4 : [ "if_icmple", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x99 : [ "ifeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9a : [ "ifne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9b : [ "iflt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9c : [ "ifge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9d : [ "ifgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9e : [ "ifle", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc7 : [ "ifnonnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc6 : [ "ifnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x84 : [ "iinc", "index:B const:B", special_F0, special_F0, None ],
0x15 : [ "iload", "index:B", special_F0, special_F0, None ],
0x1a : [ "iload_0" ],
0x1b : [ "iload_1" ],
0x1c : [ "iload_2" ],
0x1d : [ "iload_3" ],
0x68 : [ "imul" ],
0x74 : [ "ineg" ],
0xc1 : [ "instanceof", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0xb9 : [ "invokeinterface", "indexbyte1:B indexbyte2:B count:B null:B", special_F1, special_F1R, "get_interface", "get_interface_index" ],
0xb7 : [ "invokespecial", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb8 : [ "invokestatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb6 : [ "invokevirtual", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0x80 : [ "ior" ],
0x70 : [ "irem" ],
0xac : [ "ireturn" ],
0x78 : [ "ishl" ],
0x7a : [ "ishr" ],
0x36 : [ "istore", "index:B", special_F0, special_F0, None ],
0x3b : [ "istore_0" ],
0x3c : [ "istore_1" ],
0x3d : [ "istore_2" ],
0x3e : [ "istore_3" ],
0x64 : [ "isub" ],
0x7c : [ "iushr" ],
0x82 : [ "ixor" ],
0xa8 : [ "jsr", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc9 : [ "jsr_w", "branchbyte1:B branchbyte2:B branchbyte3:B branchbyte4:B", special_F3, special_F3R, None ],
0x8a : [ "l2d" ],
0x89 : [ "l2f" ],
0x88 : [ "l2i" ],
0x61 : [ "ladd" ],
0x2f : [ "laload" ],
0x7f : [ "land" ],
0x50 : [ "lastore" ],
0x94 : [ "lcmp" ],
0x9 : [ "lconst_0" ],
0xa : [ "lconst_1" ],
0x12 : [ "ldc", "index:B", special_F0, special_F0R, "get_value" ],
0x13 : [ "ldc_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x14 : [ "ldc2_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x6d : [ "ldiv" ],
0x16 : [ "lload", "index:B", special_F0, special_F0, None ],
0x1e : [ "lload_0" ],
0x1f : [ "lload_1" ],
0x20 : [ "lload_2" ],
0x21 : [ "lload_3" ],
0x69 : [ "lmul" ],
0x75 : [ "lneg" ],
0xab : [ "lookupswitch", LookupSwitch ],
0x81 : [ "lor" ],
0x71 : [ "lrem" ],
0xad : [ "lreturn" ],
0x79 : [ "lshl" ],
0x7b : [ "lshr" ],
0x37 : [ "lstore", "index:B", special_F0, special_F0, None ],
0x3f : [ "lstore_0" ],
0x40 : [ "lstore_1" ],
0x41 : [ "lstore_2" ],
0x42 : [ "lstore_3" ],
0x65 : [ "lsub" ],
0x7d : [ "lushr" ],
0x83 : [ "lxor" ],
0xc2 : [ "monitorenter" ],
0xc3 : [ "monitorexit" ],
0xc5 : [ "multianewarray", "indexbyte1:B indexbyte2:B dimensions:B", special_F4, special_F4R, None ],
0xbb : [ "new", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_class", "get_class_index2" ],
0xbc : [ "newarray", "atype:B", special_F0, special_F0, "get_array_type" ],
0x0 : [ "nop" ],
0x57 : [ "pop" ],
0x58 : [ "pop2" ],
0xb5 : [ "putfield", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xb3 : [ "putstatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xa9 : [ "ret", "index:B", special_F0, special_F0, None ],
0xb1 : [ "return" ],
0x35 : [ "saload" ],
0x56 : [ "sastore" ],
0x11 : [ "sipush", "byte1:B byte2:B", special_F1, special_F1R, None ],
0x5f : [ "swap" ],
0xaa : [ "tableswitch", TableSwitch ],
0xc4 : [ "wide" ], # FIXME
}
# Invert the value and the name of the bytecode
INVERT_JAVA_OPCODES = dict([( JAVA_OPCODES[k][0], k ) for k in JAVA_OPCODES])
# List of java bytecodes which can modify the control flow
BRANCH_JVM_OPCODES = [ "goto", "goto_w", "if_acmpeq", "if_icmpeq", "if_icmpne", "if_icmplt", "if_icmpge", "if_icmpgt", "if_icmple", "ifeq", "ifne", "iflt", "ifge", "ifgt", "ifle", "ifnonnull", "ifnull", "jsr", "jsr_w" ]
BRANCH2_JVM_OPCODES = [ "goto", "goto.", "jsr", "jsr.", "if.", "return", ".return", "tableswitch", "lookupswitch" ]
MATH_JVM_OPCODES = { ".and" : '&',
".add" : '+',
".sub" : '-',
".mul" : '*',
".div" : '/',
".shl" : '<<',
".shr" : '>>',
".xor" : '^',
".or" : '|',
}
MATH_JVM_RE = []
for i in MATH_JVM_OPCODES :
MATH_JVM_RE.append( (re.compile( i ), MATH_JVM_OPCODES[i]) )
INVOKE_JVM_OPCODES = [ "invoke." ]
FIELD_READ_JVM_OPCODES = [ "get." ]
FIELD_WRITE_JVM_OPCODES = [ "put." ]
BREAK_JVM_OPCODES = [ "invoke.", "put.", ".store", "iinc", "pop", ".return", "if." ]
INTEGER_INSTRUCTIONS = [ "bipush", "sipush" ]
def EXTRACT_INFORMATION_SIMPLE(op_value) :
"""Extract information (special functions) about a bytecode"""
r_function = JAVA_OPCODES[ op_value ][2]
v_function = JAVA_OPCODES[ op_value ][3]
f_function = JAVA_OPCODES[ op_value ][4]
r_format = ">"
r_buff = []
format = JAVA_OPCODES[ op_value ][1]
l = format.split(" ")
for j in l :
operands = j.split(":")
name = operands[0] + " "
val = operands[1]
r_buff.append( name.replace(' ', '') )
r_format += val
return ( r_function, v_function, r_buff, r_format, f_function )
def EXTRACT_INFORMATION_VARIABLE(idx, op_value, raw_format) :
r_function, v_function, r_buff, r_format, f_function = JAVA_OPCODES[ op_value ][1]( idx, raw_format )
return ( r_function, v_function, r_buff, r_format, f_function )
def determineNext(i, end, m) :
#if "invoke" in i.get_name() :
# self.childs.append( self.end, -1, ExternalMethod( i.get_operands()[0], i.get_operands()[1], i.get_operands()[2] ) )
# self.childs.append( self.end, self.end, self.__context.get_basic_block( self.end + 1 ) )
if "return" in i.get_name() :
return [ -1 ]
elif "goto" in i.get_name() :
return [ i.get_operands() + end ]
elif "jsr" in i.get_name() :
return [ i.get_operands() + end ]
elif "if" in i.get_name() :
return [ end + i.get_length(), i.get_operands() + end ]
elif "tableswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, (i.get_operands().high - i.get_operands().low) + 1) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
elif "lookupswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, i.get_operands().npairs) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
return []
def determineException(vm, m) :
return []
def classToJclass(x) :
return "L%s;" % x
METHOD_INFO = [ '>HHHH', namedtuple("MethodInfo", "access_flags name_index descriptor_index attributes_count") ]
ATTRIBUTE_INFO = [ '>HL', namedtuple("AttributeInfo", "attribute_name_index attribute_length") ]
FIELD_INFO = [ '>HHHH', namedtuple("FieldInfo", "access_flags name_index descriptor_index attributes_count") ]
LINE_NUMBER_TABLE = [ '>HH', namedtuple("LineNumberTable", "start_pc line_number") ]
EXCEPTION_TABLE = [ '>HHHH', namedtuple("ExceptionTable", "start_pc end_pc handler_pc catch_type") ]
LOCAL_VARIABLE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTable", "start_pc length name_index descriptor_index index") ]
LOCAL_VARIABLE_TYPE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTypeTable", "start_pc length name_index signature_index index") ]
CODE_LOW_STRUCT = [ '>HHL', namedtuple( "LOW", "max_stack max_locals code_length" ) ]
ARRAY_TYPE = {
4 : "T_BOOLEAN",
5 : "T_CHAR",
6 : "T_FLOAT",
7 : "T_DOUBLE",
8 : "T_BYTE",
9 : "T_SHORT",
10 : "T_INT",
11 : "T_LONG",
}
INVERT_ARRAY_TYPE = dict([( ARRAY_TYPE[k][0], k ) for k in ARRAY_TYPE])
ACC_CLASS_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0010 : [ "ACC_FINAL", "Declared final; no subclasses allowed." ],
0x0020 : [ "ACC_SUPER", "Treat superclass methods specially when invoked by the invokespecial instruction." ],
0x0200 : [ "ACC_INTERFACE", "Is an interface, not a class." ],
0x0400 : [ "ACC_ABSTRACT", "Declared abstract; may not be instantiated." ],
}
INVERT_ACC_CLASS_FLAGS = dict([( ACC_CLASS_FLAGS[k][0], k ) for k in ACC_CLASS_FLAGS])
ACC_FIELD_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0002 : [ "ACC_PRIVATE", "Declared private; usable only within the defining class." ],
0x0004 : [ "ACC_PROTECTED", "Declared protected; may be accessed within subclasses." ],
0x0008 : [ "ACC_STATIC", "Declared static." ],
0x0010 : [ "ACC_FINAL", "Declared final; no further assignment after initialization." ],
0x0040 : [ "ACC_VOLATILE", "Declared volatile; cannot be cached." ],
0x0080 : [ "ACC_TRANSIENT", "Declared transient; not written or read by a persistent object manager." ],
}
INVERT_ACC_FIELD_FLAGS = dict([( ACC_FIELD_FLAGS[k][0], k ) for k in ACC_FIELD_FLAGS])
ACC_METHOD_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0002 : [ "ACC_PRIVATE", "Declared private; accessible only within the defining class." ],
0x0004 : [ "ACC_PROTECTED", "Declared protected; may be accessed within subclasses." ],
0x0008 : [ "ACC_STATIC", "Declared static." ],
0x0010 : [ "ACC_FINAL", "Declared final; may not be overridden." ],
0x0020 : [ "ACC_SYNCHRONIZED", "Declared synchronized; invocation is wrapped in a monitor lock." ],
0x0100 : [ "ACC_NATIVE", "Declared native; implemented in a language other than Java." ],
0x0400 : [ "ACC_ABSTRACT", "Declared abstract; no implementation is provided." ],
0x0800 : [ "ACC_STRICT", "Declared strictfp; floating-point mode is FP-strict" ]
}
INVERT_ACC_METHOD_FLAGS = dict([( ACC_METHOD_FLAGS[k][0], k ) for k in ACC_METHOD_FLAGS])
class CpInfo(object) :
"""Generic class to manage constant info object"""
def __init__(self, buff) :
self.__tag = SV( '>B', buff.read_b(1) )
self.__bytes = None
self.__extra = 0
tag_value = self.__tag.get_value()
format = CONSTANT_INFO[ tag_value ][1]
self.__name = CONSTANT_INFO[ tag_value ][0]
self.format = SVs( format, CONSTANT_INFO[ tag_value ][2], buff.read( calcsize( format ) ) )
# Utf8 value ?
if tag_value == 1 :
self.__extra = self.format.get_value().length
self.__bytes = SVs( ">%ss" % self.format.get_value().length, namedtuple( CONSTANT_INFO[ tag_value ][0] + "_next", "bytes" ), buff.read( self.format.get_value().length ) )
def get_format(self) :
return self.format
def get_name(self) :
return self.__name
def get_bytes(self) :
return self.__bytes.get_value().bytes
def set_bytes(self, name) :
self.format.set_value( { "length" : len(name) } )
self.__extra = self.format.get_value().length
self.__bytes = SVs( ">%ss" % self.format.get_value().length, namedtuple( CONSTANT_INFO[ self.__tag.get_value() ][0] + "_next", "bytes" ), name )
def get_length(self) :
return self.__extra + calcsize( CONSTANT_INFO[ self.__tag.get_value() ][1] )
def get_raw(self) :
if self.__bytes != None :
return self.format.get_value_buff() + self.__bytes.get_value_buff()
return self.format.get_value_buff()
def show(self) :
if self.__bytes != None :
print self.format.get_value(), self.__bytes.get_value()
else :
print self.format.get_value()
class MethodRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(MethodRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class InterfaceMethodRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(InterfaceMethodRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class FieldRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(FieldRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class Class(CpInfo) :
def __init__(self, class_manager, buff) :
super(Class, self).__init__( buff )
def get_name_index(self) :
return self.format.get_value().name_index
class Utf8(CpInfo) :
def __init__(self, class_manager, buff) :
super(Utf8, self).__init__( buff )
class String(CpInfo) :
def __init__(self, class_manager, buff) :
super(String, self).__init__( buff )
class Integer(CpInfo) :
def __init__(self, class_manager, buff) :
super(Integer, self).__init__( buff )
class Float(CpInfo) :
def __init__(self, class_manager, buff) :
super(Float, self).__init__( buff )
class Long(CpInfo) :
def __init__(self, class_manager, buff) :
super(Long, self).__init__( buff )
class Double(CpInfo) :
def __init__(self, class_manager, buff) :
super(Double, self).__init__( buff )
class NameAndType(CpInfo) :
def __init__(self, class_manager, buff) :
super(NameAndType, self).__init__( buff )
def get_get_name_index(self) :
return self.format.get_value().get_name_index
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
class EmptyConstant :
def __init__(self) :
pass
def get_name(self) :
return ""
def get_raw(self) :
return ""
def get_length(self) :
return 0
def show(self) :
pass
CONSTANT_INFO = {
7 : [ "CONSTANT_Class", '>BH', namedtuple( "CONSTANT_Class_info", "tag name_index" ), Class ],
9 : [ "CONSTANT_Fieldref", '>BHH', namedtuple( "CONSTANT_Fieldref_info", "tag class_index name_and_type_index" ), FieldRef ],
10 : [ "CONSTANT_Methodref", '>BHH', namedtuple( "CONSTANT_Methodref_info", "tag class_index name_and_type_index" ), MethodRef ],
11 : [ "CONSTANT_InterfaceMethodref", '>BHH', namedtuple( "CONSTANT_InterfaceMethodref_info", "tag class_index name_and_type_index" ), InterfaceMethodRef ],
8 : [ "CONSTANT_String", '>BH', namedtuple( "CONSTANT_String_info", "tag string_index" ), String ],
3 : [ "CONSTANT_Integer", '>BL', namedtuple( "CONSTANT_Integer_info", "tag bytes" ), Integer ],
4 : [ "CONSTANT_Float", '>BL', namedtuple( "CONSTANT_Float_info", "tag bytes" ), Float ],
5 : [ "CONSTANT_Long", '>BLL', namedtuple( "CONSTANT_Long_info", "tag high_bytes low_bytes" ), Long ],
6 : [ "CONSTANT_Double", '>BLL', namedtuple( "CONSTANT_Long_info", "tag high_bytes low_bytes" ), Double ],
12 : [ "CONSTANT_NameAndType", '>BHH', namedtuple( "CONSTANT_NameAndType_info", "tag name_index descriptor_index" ), NameAndType ],
1 : [ "CONSTANT_Utf8", '>BH', namedtuple( "CONSTANT_Utf8_info", "tag length" ), Utf8 ]
}
INVERT_CONSTANT_INFO = dict([( CONSTANT_INFO[k][0], k ) for k in CONSTANT_INFO])
ITEM_Top = 0
ITEM_Integer = 1
ITEM_Float = 2
ITEM_Long = 4
ITEM_Double = 3
ITEM_Null = 5
ITEM_UninitializedThis = 6
ITEM_Object = 7
ITEM_Uninitialized = 8
VERIFICATION_TYPE_INFO = {
ITEM_Top : [ "Top_variable_info", '>B', namedtuple( "Top_variable_info", "tag" ) ],
ITEM_Integer : [ "Integer_variable_info", '>B', namedtuple( "Integer_variable_info", "tag" ) ],
ITEM_Float : [ "Float_variable_info", '>B', namedtuple( "Float_variable_info", "tag" ) ],
ITEM_Long : [ "Long_variable_info", '>B', namedtuple( "Long_variable_info", "tag" ) ],
ITEM_Double : [ "Double_variable_info", '>B', namedtuple( "Double_variable_info", "tag" ) ],
ITEM_Null : [ "Null_variable_info", '>B', namedtuple( "Null_variable_info", "tag" ) ],
ITEM_UninitializedThis : [ "UninitializedThis_variable_info", '>B', namedtuple( "UninitializedThis_variable_info", "tag" ) ],
ITEM_Object : [ "Object_variable_info", '>BH', namedtuple( "Object_variable_info", "tag cpool_index" ), [ ("cpool_index", "get_class") ] ],
ITEM_Uninitialized : [ "Uninitialized_variable_info", '>BH', namedtuple( "Uninitialized_variable_info", "tag offset" ) ],
}
class FieldInfo :
"""An object which represents a Field"""
def __init__(self, class_manager, buff) :
self.__raw_buff = buff.read( calcsize( FIELD_INFO[0] ) )
self.format = SVs( FIELD_INFO[0], FIELD_INFO[1], self.__raw_buff )
self.__CM = class_manager
self.__attributes = []
for i in range(0, self.format.get_value().attributes_count) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
def get_raw(self) :
return self.__raw_buff + ''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
val = 0
for i in self.__attributes :
val += i.length
return val + calcsize( FIELD_INFO[0] )
def get_access(self) :
try :
return ACC_FIELD_FLAGS[ self.format.get_value().access_flags ][0]
except KeyError :
ok = True
access = ""
for i in ACC_FIELD_FLAGS :
if (i & self.format.get_value().access_flags) == i :
access += ACC_FIELD_FLAGS[ i ][0] + " "
ok = False
if ok == False :
return access[:-1]
return "ACC_PRIVATE"
def set_access(self, value) :
self.format.set_value( { "access_flags" : value } )
def get_class_name(self) :
return self.__CM.get_this_class_name()
def get_name(self) :
return self.__CM.get_string( self.format.get_value().name_index )
def set_name(self, name) :
return self.__CM.set_string( self.format.get_value().name_index, name )
def get_descriptor(self) :
return self.__CM.get_string( self.format.get_value().descriptor_index )
def set_descriptor(self, name) :
return self.__CM.set_string( self.format.get_value().descriptor_index, name )
def get_attributes(self) :
return self.__attributes
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
def show(self) :
print self.format.get_value(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.show()
class MethodInfo :
"""An object which represents a Method"""
def __init__(self, class_manager, buff) :
self.format = SVs( METHOD_INFO[0], METHOD_INFO[1], buff.read( calcsize( METHOD_INFO[0] ) ) )
self.__CM = class_manager
self.__code = None
self.__attributes = []
for i in range(0, self.format.get_value().attributes_count) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
if ai.get_name() == "Code" :
self.__code = ai
def get_raw(self) :
return self.format.get_value_buff() + ''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
val = 0
for i in self.__attributes :
val += i.length
return val + calcsize( METHOD_INFO[0] )
def get_attributes(self) :
return self.__attributes
def get_access(self) :
return ACC_METHOD_FLAGS[ self.format.get_value().access_flags ][0]
def set_access(self, value) :
self.format.set_value( { "access_flags" : value } )
def get_name(self) :
return self.__CM.get_string( self.format.get_value().name_index )
def set_name(self, name) :
return self.__CM.set_string( self.format.get_value().name_index, name )
def get_descriptor(self) :
return self.__CM.get_string( self.format.get_value().descriptor_index )
def set_descriptor(self, name) :
return self.__CM.set_string( self.format.get_value().name_descriptor, name )
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
def get_local_variables(self) :
return self.get_code().get_local_variables()
def get_code(self) :
if self.__code == None :
return None
return self.__code.get_item()
def set_name_index(self, name_index) :
self.format.set_value( { "name_index" : name_index } )
def set_descriptor_index(self, descriptor_index) :
self.format.set_value( { "descriptor_index" : descriptor_index } )
def get_class_name(self) :
return self.__CM.get_this_class_name()
def set_cm(self, cm) :
self.__CM = cm
for i in self.__attributes :
i.set_cm( cm )
def with_descriptor(self, descriptor) :
return descriptor == self.__CM.get_string( self.format.get_value().descriptor_index )
def _patch_bytecodes(self) :
return self.get_code()._patch_bytecodes()
def show(self) :
print "*" * 80
print self.format.get_value(), self.get_class_name(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.show()
print "*" * 80
def pretty_show(self, vm_a) :
print "*" * 80
print self.format.get_value(), self.get_class_name(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.pretty_show(vm_a.hmethods[ self ])
print "*" * 80
class CreateString :
"""Create a specific String constant by given the name index"""
def __init__(self, class_manager, bytes) :
self.__string_index = class_manager.add_string( bytes )
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_String" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__string_index )
return buff
class CreateInteger :
"""Create a specific Integer constant by given the name index"""
def __init__(self, byte) :
self.__byte = byte
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Integer" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__byte )
return buff
class CreateClass :
"""Create a specific Class constant by given the name index"""
def __init__(self, class_manager, name_index) :
self.__CM = class_manager
self.__name_index = name_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Class" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__name_index )
return buff
class CreateNameAndType :
"""Create a specific NameAndType constant by given the name and the descriptor index"""
def __init__(self, class_manager, name_index, descriptor_index) :
self.__CM = class_manager
self.__name_index = name_index
self.__descriptor_index = descriptor_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_NameAndType" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__name_index, self.__descriptor_index )
return buff
class CreateFieldRef :
"""Create a specific FieldRef constant by given the class and the NameAndType index"""
def __init__(self, class_manager, class_index, name_and_type_index) :
self.__CM = class_manager
self.__class_index = class_index
self.__name_and_type_index = name_and_type_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Fieldref" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__class_index, self.__name_and_type_index )
return buff
class CreateMethodRef :
"""Create a specific MethodRef constant by given the class and the NameAndType index"""
def __init__(self, class_manager, class_index, name_and_type_index) :
self.__CM = class_manager
self.__class_index = class_index
self.__name_and_type_index = name_and_type_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Methodref" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__class_index, self.__name_and_type_index )
return buff
class CreateCodeAttributeInfo :
"""Create a specific CodeAttributeInfo by given bytecodes (into an human readable format)"""
def __init__(self, class_manager, codes) :
self.__CM = class_manager
#ATTRIBUTE_INFO = [ '>HL', namedtuple("AttributeInfo", "attribute_name_index attribute_length") ]
self.__attribute_name_index = self.__CM.get_string_index( "Code" )
self.__attribute_length = 0
########
# CODE_LOW_STRUCT = [ '>HHL', namedtuple( "LOW", "max_stack max_locals code_length" ) ]
self.__max_stack = 1
self.__max_locals = 2
self.__code_length = 0
########
# CODE
raw_buff = ""
for i in codes :
op_name = i[0]
op_value = INVERT_JAVA_OPCODES[ op_name ]
raw_buff += pack( '>B', op_value )
if len( JAVA_OPCODES[ op_value ] ) > 1 :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
raw_buff += pack(r_format, *v_function( *i[1:] ) )
self.__code = JavaCode( self.__CM, raw_buff )
self.__code_length = len( raw_buff )
########
# EXCEPTION
# u2 exception_table_length;
self.__exception_table_length = 0
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
self.__exception_table = []
########
# ATTRIBUTES
# u2 attributes_count;
self.__attributes_count = 0
# attribute_info attributes[attributes_count];
self.__attributes = []
########
# FIXME : remove calcsize
self.__attribute_length = calcsize( ATTRIBUTE_INFO[0] ) + \
calcsize( CODE_LOW_STRUCT[0] ) + \
self.__code_length + \
calcsize('>H') + \
calcsize('>H')
def get_raw(self) :
return pack( ATTRIBUTE_INFO[0], self.__attribute_name_index, self.__attribute_length ) + \
pack( CODE_LOW_STRUCT[0], self.__max_stack, self.__max_locals, self.__code_length ) + \
self.__code.get_raw() + \
pack( '>H', self.__exception_table_length ) + \
''.join( i.get_raw() for i in self.__exception_table ) + \
pack( '>H', self.__attributes_count ) + \
''.join( i.get_raw() for i in self.__attributes )
# FIELD_INFO = [ '>HHHH', namedtuple("FieldInfo", "access_flags name_index descriptor_index attributes_count") ]
class CreateFieldInfo :
"""Create a specific FieldInfo by given the name, the prototype of the "new" field"""
def __init__(self, class_manager, name, proto) :
self.__CM = class_manager
access_flags_value = proto[0]
type_value = proto[1]
self.__access_flags = INVERT_ACC_FIELD_FLAGS[ access_flags_value ]
self.__name_index = self.__CM.get_string_index( name )
if self.__name_index == -1 :
self.__name_index = self.__CM.add_string( name )
else :
bytecode.Exit("field %s is already present ...." % name)
self.__descriptor_index = self.__CM.add_string( type_value )
self.__attributes = []
def get_raw(self) :
buff = pack( FIELD_INFO[0], self.__access_flags, self.__name_index, self.__descriptor_index, len(self.__attributes) )
for i in self.__attributes :
buff += i.get_raw()
return buff
# METHOD_INFO = [ '>HHHH', namedtuple("MethodInfo", "access_flags name_index descriptor_index attributes_count") ]
class CreateMethodInfo :
"""Create a specific MethodInfo by given the name, the prototype and the code (into an human readable format) of the "new" method"""
def __init__(self, class_manager, name, proto, codes) :
self.__CM = class_manager
access_flags_value = proto[0]
return_value = proto[1]
arguments_value = proto[2]
self.__access_flags = INVERT_ACC_METHOD_FLAGS[ access_flags_value ]
self.__name_index = self.__CM.get_string_index( name )
if self.__name_index == -1 :
self.__name_index = self.__CM.add_string( name )
proto_final = "(" + arguments_value + ")" + return_value
self.__descriptor_index = self.__CM.add_string( proto_final )
self.__attributes = []
self.__attributes.append( CreateCodeAttributeInfo( self.__CM, codes ) )
def get_raw(self) :
buff = pack( METHOD_INFO[0], self.__access_flags, self.__name_index, self.__descriptor_index, len(self.__attributes) )
for i in self.__attributes :
buff += i.get_raw()
return buff
class JBC :
"""JBC manages each bytecode with the value, name, raw buffer and special functions"""
# special --> ( r_function, v_function, r_buff, r_format, f_function )
def __init__(self, class_manager, op_name, raw_buff, special=None) :
self.__CM = class_manager
self.__op_name = op_name
self.__raw_buff = raw_buff
self.__special = special
self.__special_value = None
self._load()
def _load(self) :
if self.__special != None :
ntuple = namedtuple( self.__op_name, self.__special[2] )
x = ntuple._make( unpack( self.__special[3], self.__raw_buff[1:] ) )
if self.__special[4] == None :
self.__special_value = self.__special[0]( x )
else :
self.__special_value = getattr(self.__CM, self.__special[4])( self.__special[0]( x ) )
def reload(self, raw_buff) :
"""Reload the bytecode with a new raw buffer"""
self.__raw_buff = raw_buff
self._load()
def set_cm(self, cm) :
self.__CM = cm
def get_length(self) :
"""Return the length of the bytecode"""
return len( self.__raw_buff )
def get_raw(self) :
"""Return the current raw buffer of the bytecode"""
return self.__raw_buff
def get_name(self) :
"""Return the name of the bytecode"""
return self.__op_name
def get_operands(self) :
"""Return the operands of the bytecode"""
if isinstance( self.__special_value, list ):
if len(self.__special_value) == 1 :
return self.__special_value[0]
return self.__special_value
def get_formatted_operands(self) :
return []
def adjust_r(self, pos, pos_modif, len_modif) :
"""Adjust the bytecode (if necessary (in this cas the bytecode is a branch bytecode)) when a bytecode has been removed"""
# print self.__op_name, pos, pos_modif, len_modif, self.__special_value, type(pos), type(pos_modif), type(len_modif), type(self.__special_value)
if pos > pos_modif :
if (self.__special_value + pos) < (pos_modif) :
# print "MODIF +", self.__special_value, len_modif,
self.__special_value += len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
elif pos < pos_modif :
if (self.__special_value + pos) > (pos_modif) :
# print "MODIF -", self.__special_value, len_modif,
self.__special_value -= len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
def adjust_i(self, pos, pos_modif, len_modif) :
"""Adjust the bytecode (if necessary (in this cas the bytecode is a branch bytecode)) when a bytecode has been inserted"""
#print self.__op_name, pos, pos_modif, len_modif, self.__special_value, type(pos), type(pos_modif), type(len_modif), type(self.__special_value)
if pos > pos_modif :
if (self.__special_value + pos) < (pos_modif) :
# print "MODIF +", self.__special_value, len_modif,
self.__special_value -= len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
elif pos < pos_modif :
if (self.__special_value + pos) > (pos_modif) :
# print "MODIF -", self.__special_value, len_modif,
self.__special_value += len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
def show_buff(self, pos) :
buff = ""
if self.__special_value == None :
buff += self.__op_name
else :
if self.__op_name in BRANCH_JVM_OPCODES :
buff += "%s %s %s" % (self.__op_name, self.__special_value, self.__special_value + pos)
else :
buff += "%s %s" % (self.__op_name, self.__special_value)
return buff
def show(self, pos) :
"""Show the bytecode at a specific position
pos - the position into the bytecodes (integer)
"""
print self.show_buff( pos ),
class JavaCode :
"""JavaCode manages a list of bytecode to a specific method, by decoding a raw buffer and transform each bytecode into a JBC object"""
def __init__(self, class_manager, buff) :
self.__CM = class_manager
self.__raw_buff = buff
self.__bytecodes = []
self.__maps = []
self.__branches = []
i = 0
while i < len(self.__raw_buff) :
op_value = unpack( '>B', self.__raw_buff[i])[0]
if op_value in JAVA_OPCODES :
if len( JAVA_OPCODES[ op_value ] ) >= 2 :
# it's a fixed length opcode
if isinstance(JAVA_OPCODES[ op_value ][1], str) == True :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# it's a variable length opcode
else :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_VARIABLE( i, op_value, self.__raw_buff[ i : ] )
len_format = calcsize(r_format)
raw_buff = self.__raw_buff[ i : i + 1 + len_format ]
jbc = JBC( class_manager, JAVA_OPCODES[ op_value ][0], raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
self.__bytecodes.append( jbc )
i += len_format
else :
self.__bytecodes.append( JBC( class_manager, JAVA_OPCODES[ op_value ][0], self.__raw_buff[ i ] ) )
else :
bytecode.Exit( "op_value 0x%x is unknown" % op_value )
i += 1
# Create branch bytecodes list
idx = 0
nb = 0
for i in self.__bytecodes :
self.__maps.append( idx )
if i.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( nb )
idx += i.get_length()
nb += 1
def _patch_bytecodes(self) :
methods = []
for i in self.__bytecodes :
if "invoke" in i.get_name() :
operands = i.get_operands()
methods.append( operands )
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_method_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[0:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "anewarray" in i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
elif "getstatic" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_field_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[1:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "ldc" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
if operands[0] != "CONSTANT_Integer" and operands[0] != "CONSTANT_String" :
bytecode.Exit( "...." )
if operands[0] == "CONSTANT_Integer" :
new_int_index = self.__CM.create_integer( operands[1] )
raw_buff += pack(r_format, *v_function( new_int_index ) )
elif operands[0] == "CONSTANT_String" :
new_string_index = self.__CM.create_string( operands[1] )
raw_buff += pack(r_format, *v_function( new_string_index ) )
i.reload( raw_buff )
elif "new" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
return methods
def get(self) :
"""
Return all bytecodes
@rtype : L{list}
"""
return self.__bytecodes
def get_raw(self) :
return ''.join(x.get_raw() for x in self.__bytecodes)
def show(self) :
"""
Display the code like a disassembler
"""
nb = 0
for i in self.__bytecodes :
print nb, self.__maps[nb],
i.show( self.__maps[nb] )
print
nb += 1
def pretty_show(self, m_a) :
"""
Display the code like a disassembler but with instructions' links
"""
bytecode.PrettyShow( m_a.basic_blocks.gets() )
bytecode.PrettyShowEx( m_a.exceptions.gets() )
def get_relative_idx(self, idx) :
"""
Return the relative idx by given an offset in the code
@param idx : an offset in the code
@rtype : the relative index in the code, it's the position in the list of a bytecode
"""
n = 0
x = 0
for i in self.__bytecodes :
#print n, idx
if n == idx :
return x
n += i.get_length()
x += 1
return -1
def get_at(self, idx) :
"""
Return a specific bytecode at an index
@param : the index of a bytecode
@rtype : L{JBC}
"""
return self.__bytecodes[ idx ]
def remove_at(self, idx) :
"""
Remove bytecode at a specific index
@param idx : the index to remove the bytecode
@rtype : the length of the removed bytecode
"""
val = self.__bytecodes[idx]
val_m = self.__maps[idx]
# Remove the index if it's in our branch list
if idx in self.__branches :
self.__branches.remove( idx )
# Adjust each branch
for i in self.__branches :
self.__bytecodes[i].adjust_r( self.__maps[i], val_m, val.get_length() )
# Remove it !
self.__maps.pop(idx)
self.__bytecodes.pop(idx)
# Adjust branch and map list
self._adjust_maps( val_m, val.get_length() * -1 )
self._adjust_branches( idx, -1 )
return val.get_length()
def _adjust_maps(self, val, size) :
nb = 0
for i in self.__maps :
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_maps_i(self, val, size) :
nb = 0
x = 0
for i in self.__maps :
if i == val :
x+=1
if x == 2 :
self.__maps[ nb ] = i + size
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_branches(self, val, size) :
nb = 0
for i in self.__branches :
if i > val :
self.__branches[ nb ] = i + size
nb += 1
def insert_at(self, idx, byte_code) :
"""
Insert bytecode at a specific index
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
# Get the op_value and add it to the raw_buff
op_name = byte_code[0]
op_value = INVERT_JAVA_OPCODES[ op_name ]
raw_buff = pack( '>B', op_value )
new_jbc = None
# If it's an op_value with args, we must handle that !
if len( JAVA_OPCODES[ op_value ] ) > 1 :
# Find information about the op_value
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# Special values for this op_value (advanced bytecode)
if len( JAVA_OPCODES[ op_value ] ) == 6 :
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *byte_code[1:] )
if value == -1 :
bytecode.Exit( "Unable to found " + str(byte_code[1:]) )
raw_buff += pack(r_format, *v_function( value ) )
else :
raw_buff += pack(r_format, *v_function( *byte_code[1:] ) )
new_jbc = JBC(self.__CM, op_name, raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
else :
new_jbc = JBC(self.__CM, op_name, raw_buff)
# Adjust each branch with the new insertion
val_m = self.__maps[ idx ]
for i in self.__branches :
self.__bytecodes[i].adjust_i( self.__maps[i], val_m, new_jbc.get_length() )
# Insert the new bytecode at the correct index
# Adjust maps + branches
self.__bytecodes.insert( idx, new_jbc )
self.__maps.insert( idx, val_m )
self._adjust_maps_i( val_m, new_jbc.get_length() )
self._adjust_branches( idx, 1 )
# Add it to the branches if it's a correct op_value
if new_jbc.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( idx )
# FIXME
# modify the exception table
# modify tableswitch and lookupswitch instructions
# return the length of the raw_buff
return len(raw_buff)
def remplace_at(self, idx, bytecode) :
"""
Remplace bytecode at a specific index by another bytecode (remplace = remove + insert)
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
self.remove_at(idx) * (-1)
size = self.insert_at(idx, bytecode)
return size
def set_cm(self, cm) :
self.__CM = cm
for i in self.__bytecodes :
i.set_cm( cm )
class BasicAttribute(object) :
def __init__(self) :
self.__attributes = []
def get_attributes(self) :
return self.__attributes
def set_cm(self, cm) :
self.__CM = cm
class CodeAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(CodeAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 max_stack;
# u2 max_locals;
# u4 code_length;
# u1 code[code_length];
self.low_struct = SVs( CODE_LOW_STRUCT[0], CODE_LOW_STRUCT[1], buff.read( calcsize(CODE_LOW_STRUCT[0]) ) )
self.__code = JavaCode( class_manager, buff.read( self.low_struct.get_value().code_length ) )
# u2 exception_table_length;
self.exception_table_length = SV( '>H', buff.read(2) )
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
self.__exception_table = []
for i in range(0, self.exception_table_length.get_value()) :
et = SVs( EXCEPTION_TABLE[0], EXCEPTION_TABLE[1], buff.read( calcsize(EXCEPTION_TABLE[0]) ) )
self.__exception_table.append( et )
# u2 attributes_count;
self.attributes_count = SV( '>H', buff.read(2) )
# attribute_info attributes[attributes_count];
self.__attributes = []
for i in range(0, self.attributes_count.get_value()) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
def get_attributes(self) :
return self.__attributes
def get_exceptions(self) :
return self.__exception_table
def get_raw(self) :
return self.low_struct.get_value_buff() + \
self.__code.get_raw() + \
self.exception_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.__exception_table) + \
self.attributes_count.get_value_buff() + \
''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
return self.low_struct.get_value().code_length
def get_max_stack(self) :
return self.low_struct.get_value().max_stack
def get_max_locals(self) :
return self.low_struct.get_value().max_locals
def get_local_variables(self) :
for i in self.__attributes :
if i.get_name() == "StackMapTable" :
return i.get_item().get_local_variables()
return []
def get_bc(self) :
return self.__code
# FIXME : show* --> add exceptions
def show_info(self) :
print "!" * 70
print self.low_struct.get_value()
bytecode._Print( "ATTRIBUTES_COUNT", self.attributes_count.get_value() )
for i in self.__attributes :
i.show()
print "!" * 70
def _begin_show(self) :
print "!" * 70
print self.low_struct.get_value()
def _end_show(self) :
bytecode._Print( "ATTRIBUTES_COUNT", self.attributes_count.get_value() )
for i in self.__attributes :
i.show()
print "!" * 70
def show(self) :
self._begin_show()
self.__code.show()
self._end_show()
def pretty_show(self, m_a) :
self._begin_show()
self.__code.pretty_show(m_a)
self._end_show()
def _patch_bytecodes(self) :
return self.__code._patch_bytecodes()
def remplace_at(self, idx, bytecode) :
size = self.__code.remplace_at(idx, bytecode)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length + size } )
def remove_at(self, idx) :
size = self.__code.remove_at(idx)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length - size } )
def removes_at(self, l_idx) :
i = 0
while i < len(l_idx) :
self.remove_at( l_idx[i] )
j = i + 1
while j < len(l_idx) :
if l_idx[j] > l_idx[i] :
l_idx[j] -= 1
j += 1
i += 1
def inserts_at(self, idx, l_bc) :
# self.low_struct.set_value( { "max_stack" : self.low_struct.get_value().max_stack + 2 } )
# print self.low_struct.get_value()
total_size = 0
for i in l_bc :
size = self.insert_at( idx, i )
idx += 1
total_size += size
return total_size
def insert_at(self, idx, bytecode) :
size = self.__code.insert_at(idx, bytecode)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length + size } )
return size
def get_relative_idx(self, idx) :
return self.__code.get_relative_idx(idx)
def get_at(self, idx) :
return self.__code.get_at(idx)
def gets_at(self, l_idx) :
return [ self.__code.get_at(i) for i in l_idx ]
def set_cm(self, cm) :
self.__CM = cm
for i in self.__attributes :
i.set_cm( cm )
self.__code.set_cm( cm )
def _fix_attributes(self, new_cm) :
for i in self.__attributes :
i._fix_attributes( new_cm )
class SourceFileAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SourceFileAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 sourcefile_index;
self.sourcefile_index = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.sourcefile_index.get_value_buff()
def show(self) :
print self.sourcefile_index
class LineNumberTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LineNumberTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 line_number_table_length;
# { u2 start_pc;
# u2 line_number;
# } line_number_table[line_number_table_length];
self.line_number_table_length = SV( '>H', buff.read( 2 ) )
self.__line_number_table = []
for i in range(0, self.line_number_table_length.get_value()) :
lnt = SVs( LINE_NUMBER_TABLE[0], LINE_NUMBER_TABLE[1], buff.read( 4 ) )
self.__line_number_table.append( lnt )
def get_raw(self) :
return self.line_number_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.__line_number_table)
def get_line_number_table(self) :
return self.__line_number_table
def show(self) :
bytecode._Print("LINE_NUMBER_TABLE_LENGTH", self.line_number_table_length.get_value())
for x in self.__line_number_table :
print "\t", x.get_value()
def _fix_attributes(self, new_cm) :
pass
class LocalVariableTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LocalVariableTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 descriptor_index;
# u2 index;
# } local_variable_table[local_variable_table_length];
self.local_variable_table_length = SV( '>H', buff.read(2) )
self.local_variable_table = []
for i in range(0, self.local_variable_table_length.get_value()) :
lvt = SVs( LOCAL_VARIABLE_TABLE[0], LOCAL_VARIABLE_TABLE[1], buff.read( calcsize(LOCAL_VARIABLE_TABLE[0]) ) )
self.local_variable_table.append( lvt )
def get_raw(self) :
return self.local_variable_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.local_variable_table)
def show(self) :
print "LocalVariableTable", self.local_variable_table_length.get_value()
for x in self.local_variable_table :
print x.get_value()
class LocalVariableTypeTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LocalVariableTypeTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_type_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 signature_index;
# u2 index;
# } local_variable_type_table[local_variable_type_table_length];
self.local_variable_type_table_length = SV( '>H', buff.read(2) )
self.local_variable_type_table = []
for i in range(0, self.local_variable_type_table_length.get_value()) :
lvtt = SVs( LOCAL_VARIABLE_TYPE_TABLE[0], LOCAL_VARIABLE_TYPE_TABLE[1], buff.read( calcsize(LOCAL_VARIABLE_TYPE_TABLE[0]) ) )
self.local_variable_type_table.append( lvtt )
def get_raw(self) :
return self.local_variable_type_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.local_variable_type_table)
def show(self) :
print "LocalVariableTypeTable", self.local_variable_type_table_length.get_value()
for x in self.local_variable_type_table :
print x.get_value()
class SourceDebugExtensionAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SourceDebugExtensionAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u1 debug_extension[attribute_length];
self.debug_extension = buff.read( self.attribute_length )
def get_raw(self) :
return self.debug_extension
def show(self) :
print "SourceDebugExtension", self.debug_extension.get_value()
class DeprecatedAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(DeprecatedAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
def get_raw(self) :
return ''
def show(self) :
print "Deprecated"
class SyntheticAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SyntheticAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
def get_raw(self) :
return ''
def show(self) :
print "Synthetic"
class SignatureAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SignatureAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 signature_index;
self.signature_index = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.signature_index.get_value_buff()
def show(self) :
print "Signature", self.signature_index.get_value()
class RuntimeVisibleAnnotationsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(RuntimeVisibleAnnotationsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 num_annotations;
# annotation annotations[num_annotations];
self.num_annotations = SV( '>H', buff.read(2) )
self.annotations = []
for i in range(0, self.num_annotations.get_value()) :
self.annotations.append( Annotation(cm, buff) )
def get_raw(self) :
return self.num_annotations.get_value_buff() + \
''.join(x.get_raw() for x in self.annotations)
def show(self) :
print "RuntimeVisibleAnnotations", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class RuntimeInvisibleAnnotationsAttribute(RuntimeVisibleAnnotationsAttribute) :
def show(self) :
print "RuntimeInvisibleAnnotations", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class RuntimeVisibleParameterAnnotationsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(RuntimeVisibleParameterAnnotationsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u1 num_parameters;
#{
# u2 num_annotations;
# annotation annotations[num_annotations];
#} parameter_annotations[num_parameters];
self.num_parameters = SV( '>H', buff.read(2) )
self.parameter_annotations = []
for i in range(0, self.num_parameters.get_value()) :
self.parameter_annotations.append( ParameterAnnotation( cm, buff ) )
def get_raw(self) :
return self.num_parameters.get_value_buff() + \
''.join(x.get_raw() for x in self.parameter_annotations)
def show(self) :
print "RuntimeVisibleParameterAnnotations", self.num_parameters.get_value()
for i in self.parameter_annotations :
i.show()
class RuntimeInvisibleParameterAnnotationsAttribute(RuntimeVisibleParameterAnnotationsAttribute) :
def show(self) :
print "RuntimeVisibleParameterAnnotations", self.num_annotations.get_value()
for i in self.parameter_annotations :
i.show()
class ParameterAnnotation :
def __init__(self, cm, buff) :
# u2 num_annotations;
# annotation annotations[num_annotations];
self.num_annotations = SV( '>H', buff.read(2) )
self.annotations = []
for i in range(0, self.num_annotations.get_value()) :
self.annotations = Annotation( cm, buff )
def get_raw(self) :
return self.num_annotations.get_value_buff() + \
''.join(x.get_raw() for x in self.annotations)
def show(self) :
print "ParameterAnnotation", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class AnnotationDefaultAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(AnnotationDefaultAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# element_value default_value;
self.default_value = ElementValue( cm, buff )
def get_raw(self) :
return self.default_value.get_raw()
def show(self) :
print "AnnotationDefault"
self.default_value.show()
class Annotation :
def __init__(self, cm, buff) :
# u2 type_index;
# u2 num_element_value_pairs;
# { u2 element_name_index;
# element_value value;
# } element_value_pairs[num_element_value_pairs]
self.type_index = SV( '>H', buff.read(2) )
self.num_element_value_pairs = SV( '>H', buff.read(2) )
self.element_value_pairs = []
for i in range(0, self.num_element_value_pairs.get_value()) :
self.element_value_pairs.append( ElementValuePair(cm, buff) )
def get_raw(self) :
return self.type_index.get_value_buff() + self.num_element_value_pairs.get_value_buff() + \
''.join(x.get_raw() for x in self.element_value_pairs)
def show(self) :
print "Annotation", self.type_index.get_value(), self.num_element_value_pairs.get_value()
for i in self.element_value_pairs :
i.show()
class ElementValuePair :
def __init__(self, cm, buff) :
# u2 element_name_index;
# element_value value;
self.element_name_index = SV( '>H', buff.read(2) )
self.value = ElementValue(cm, buff)
def get_raw(self) :
return self.element_name_index.get_value_buff() + \
self.value.get_raw()
def show(self) :
print "ElementValuePair", self.element_name_index.get_value()
self.value.show()
ENUM_CONST_VALUE = [ '>HH', namedtuple("EnumConstValue", "type_name_index const_name_index") ]
class ElementValue :
def __init__(self, cm, buff) :
# u1 tag;
# union {
# u2 const_value_index;
# {
# u2 type_name_index;
# u2 const_name_index;
# } enum_const_value;
# u2 class_info_index;
# annotation annotation_value;
# {
# u2 num_values;
# element_value values[num_values];
# } array_value;
# } value;
self.tag = SV( '>B', buff.read(1) )
tag = chr( self.tag.get_value() )
if tag == 'B' or tag == 'C' or tag == 'D' or tag == 'F' or tag == 'I' or tag == 'J' or tag == 'S' or tag == 'Z' or tag == 's' :
self.value = SV( '>H', buff.read(2) )
elif tag == 'e' :
self.value = SVs( ENUM_CONST_VALUE[0], ENUM_CONST_VALUE[1], buff.read( calcsize(ENUM_CONST_VALUE[0]) ) )
elif tag == 'c' :
self.value = SV( '>H', buff.read(2) )
elif tag == '@' :
self.value = Annotation( cm, buff )
elif tag == '[' :
self.value = ArrayValue( cm, buff )
else :
bytecode.Exit( "tag %c not in VERIFICATION_TYPE_INFO" % self.tag.get_value() )
def get_raw(self) :
if isinstance(self.value, SV) or isinstance(self.value, SVs) :
return self.tag.get_value_buff() + self.value.get_value_buff()
return self.tag.get_value_buff() + self.value.get_raw()
def show(self) :
print "ElementValue", self.tag.get_value()
if isinstance(self.value, SV) or isinstance(self.value, SVs) :
print self.value.get_value()
else :
self.value.show()
class ArrayValue :
def __init__(self, cm, buff) :
# u2 num_values;
# element_value values[num_values];
self.num_values = SV( '>H', buff.read(2) )
self.values = []
for i in range(0, self.num_values.get_value()) :
self.values.append( ElementValue(cm, buff) )
def get_raw(self) :
return self.num_values.get_value_buff() + \
''.join(x.get_raw() for x in self.values)
def show(self) :
print "ArrayValue", self.num_values.get_value()
for i in self.values :
i.show()
class ExceptionsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(ExceptionsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_exceptions;
# u2 exception_index_table[number_of_exceptions];
self.number_of_exceptions = SV( '>H', buff.read(2) )
self.__exception_index_table = []
for i in range(0, self.number_of_exceptions.get_value()) :
self.__exception_index_table.append( SV( '>H', buff.read(2) ) )
def get_raw(self) :
return self.number_of_exceptions.get_value_buff() + ''.join(x.get_value_buff() for x in self.__exception_index_table)
def get_exception_index_table(self) :
return self.__exception_index_table
def show(self) :
print "Exceptions", self.number_of_exceptions.get_value()
for i in self.__exception_index_table :
print "\t", i
class VerificationTypeInfo :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
tag = SV( '>B', buff.read_b(1) ).get_value()
if tag not in VERIFICATION_TYPE_INFO :
bytecode.Exit( "tag not in VERIFICATION_TYPE_INFO" )
format = VERIFICATION_TYPE_INFO[ tag ][1]
self.format = SVs( format, VERIFICATION_TYPE_INFO[ tag ][2], buff.read( calcsize( format ) ) )
def get_raw(self) :
return self.format.get_value_buff()
def show(self) :
general_format = self.format.get_value()
if len( VERIFICATION_TYPE_INFO[ general_format.tag ] ) > 3 :
print general_format,
for (i,j) in VERIFICATION_TYPE_INFO[ general_format.tag ][3] :
print getattr(self.__CM, j)( getattr(general_format, i) )
else :
print general_format
def _fix_attributes(self, new_cm) :
general_format = self.format.get_value()
if len( VERIFICATION_TYPE_INFO[ general_format.tag ] ) > 3 :
for (i,j) in VERIFICATION_TYPE_INFO[ general_format.tag ][3] :
# Fix the first object which is the current class
if getattr(self.__CM, j)( getattr(general_format, i) )[0] == self.__CM.get_this_class_name() :
self.format.set_value( { "cpool_index" : new_cm.get_this_class() } )
# Fix other objects
else :
new_class_index = new_cm.create_class( getattr(self.__CM, j)( getattr(general_format, i) )[0] )
self.format.set_value( { "cpool_index" : new_class_index } )
def set_cm(self, cm) :
self.__CM = cm
class FullFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = FULL_FRAME; /* 255 */
# u2 offset_delta;
# u2 number_of_locals;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
self.number_of_locals = SV( '>H', buff.read(2) )
# verification_type_info locals[number_of_locals];
self.__locals = []
for i in range(0, self.number_of_locals.get_value()) :
self.__locals.append( VerificationTypeInfo( self.__CM, buff ) )
# u2 number_of_stack_items;
self.number_of_stack_items = SV( '>H', buff.read(2) )
# verification_type_info stack[number_of_stack_items];
self.__stack = []
for i in range(0, self.number_of_stack_items.get_value()) :
self.__stack.append( VerificationTypeInfo( self.__CM, buff ) )
def get_locals(self) :
return self.__locals
def get_raw(self) :
return self.frame_type.get_value_buff() + \
self.offset_delta.get_value_buff() + \
self.number_of_locals.get_value_buff() + \
''.join(x.get_raw() for x in self.__locals) + \
self.number_of_stack_items.get_value_buff() + \
''.join(x.get_raw() for x in self.__stack)
def show(self) :
print "#" * 60
bytecode._Print("\tFULL_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
bytecode._Print("\tNUMBER_OF_LOCALS", self.number_of_locals.get_value())
for i in self.__locals :
i.show()
bytecode._Print("\tNUMBER_OF_STACK_ITEMS", self.number_of_stack_items.get_value())
for i in self.__stack :
i.show()
print "#" * 60
def _fix_attributes(self, new_cm) :
for i in self.__locals :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__locals :
i.set_cm( cm )
class ChopFrame :
def __init__(self, buff) :
# u1 frame_type=CHOP; /* 248-250 */
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff()
def show(self) :
print "#" * 60
bytecode._Print("\tCHOP_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
print "#" * 60
def _fix_attributes(self, cm) :
pass
def set_cm(self, cm) :
pass
class SameFrame :
def __init__(self, buff) :
# u1 frame_type = SAME;/* 0-63 */
self.frame_type = SV( '>B', buff.read(1) )
def get_raw(self) :
return self.frame_type.get_value_buff()
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_FRAME", self.frame_type.get_value())
print "#" * 60
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
pass
class SameLocals1StackItemFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = SAME_LOCALS_1_STACK_ITEM;/* 64-127 */
# verification_type_info stack[1];
self.frame_type = SV( '>B', buff.read(1) )
self.stack = VerificationTypeInfo( self.__CM, buff )
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_LOCALS_1_STACK_ITEM_FRAME", self.frame_type.get_value())
self.stack.show()
print "#" * 60
def get_raw(self) :
return self.frame_type.get_value_buff() + self.stack.get_raw()
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
self.__CM = cm
class SameLocals1StackItemFrameExtended :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */
# u2 offset_delta;
# verification_type_info stack[1];
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
self.stack = VerificationTypeInfo( self.__CM, buff )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff() + self.stack.get_value_buff()
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
self.__CM = cm
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
self.stack.show()
print "#" * 60
class SameFrameExtended :
def __init__(self, buff) :
# u1 frame_type = SAME_FRAME_EXTENDED;/* 251*/
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff()
def _fix_attributes(self, cm) :
pass
def set_cm(self, cm) :
pass
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_FRAME_EXTENDED", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
print "#" * 60
class AppendFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = APPEND; /* 252-254 */
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
# verification_type_info locals[frame_type -251];
self.__locals = []
k = self.frame_type.get_value() - 251
for i in range(0, k) :
self.__locals.append( VerificationTypeInfo( self.__CM, buff ) )
def get_locals(self) :
return self.__locals
def show(self) :
print "#" * 60
bytecode._Print("\tAPPEND_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
for i in self.__locals :
i.show()
print "#" * 60
def get_raw(self) :
return self.frame_type.get_value_buff() + \
self.offset_delta.get_value_buff() + \
''.join(x.get_raw() for x in self.__locals)
def _fix_attributes(self, new_cm) :
for i in self.__locals :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__locals :
i.set_cm( cm )
class StackMapTableAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(StackMapTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length
# u2 number_of_entries;
self.number_of_entries = SV( '>H', buff.read(2) )
# stack_map_frame entries[number_of_entries];
self.__entries = []
for i in range(0, self.number_of_entries.get_value()) :
frame_type = SV( '>B', buff.read_b(1) ).get_value()
if frame_type >= 0 and frame_type <= 63 :
self.__entries.append( SameFrame( buff ) )
elif frame_type >= 64 and frame_type <= 127 :
self.__entries.append( SameLocals1StackItemFrame( self.__CM, buff ) )
elif frame_type == 247 :
self.__entries.append( SameLocals1StackItemFrameExtended( self.__CM, buff ) )
elif frame_type >= 248 and frame_type <= 250 :
self.__entries.append( ChopFrame( buff ) )
elif frame_type == 251 :
self.__entries.append( SameFrameExtended( buff ) )
elif frame_type >= 252 and frame_type <= 254 :
self.__entries.append( AppendFrame( self.__CM, buff ) )
elif frame_type == 255 :
self.__entries.append( FullFrame( self.__CM, buff ) )
else :
bytecode.Exit( "Frame type %d is unknown" % frame_type )
def get_entries(self) :
return self.__entries
def get_local_variables(self) :
for i in self.__entries :
if isinstance(i, FullFrame) :
return i.get_local_variables()
return []
def get_raw(self) :
return self.number_of_entries.get_value_buff() + \
''.join(x.get_raw() for x in self.__entries )
def show(self) :
bytecode._Print("NUMBER_OF_ENTRIES", self.number_of_entries.get_value())
for i in self.__entries :
i.show()
def _fix_attributes(self, new_cm) :
for i in self.__entries :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__entries :
i.set_cm( cm )
class InnerClassesDesc :
def __init__(self, class_manager, buff) :
INNER_CLASSES_FORMAT = [ ">HHHH", "inner_class_info_index outer_class_info_index inner_name_index inner_class_access_flags" ]
self.__CM = class_manager
self.__raw_buff = buff.read( calcsize( INNER_CLASSES_FORMAT[0] ) )
self.format = SVs( INNER_CLASSES_FORMAT[0], namedtuple( "InnerClassesFormat", INNER_CLASSES_FORMAT[1] ), self.__raw_buff )
def show(self) :
print self.format
def get_raw(self) :
return self.format.get_value_buff()
def set_cm(self, cm) :
self.__CM = cm
class InnerClassesAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(InnerClassesAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length
# u2 number_of_classes;
self.number_of_classes = SV( '>H', buff.read(2) )
# { u2 inner_class_info_index;
# u2 outer_class_info_index;
# u2 inner_name_index;
# u2 inner_class_access_flags;
# } classes[number_of_classes];
self.__classes = []
for i in range(0, self.number_of_classes.get_value()) :
self.__classes.append( InnerClassesDesc( self.__CM, buff ) )
def get_classes(self) :
return self.__classes
def show(self) :
print self.number_of_classes
for i in self.__classes :
i.show()
def set_cm(self, cm) :
self.__CM = cm
for i in self.__classes :
i.set_cm( cm )
def get_raw(self) :
return self.number_of_classes.get_value_buff() + \
''.join(x.get_raw() for x in self.__classes)
class ConstantValueAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(ConstantValueAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 constantvalue_index;
self.constantvalue_index = SV( '>H', buff.read(2) )
def show(self) :
print self.constantvalue_index
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.constantvalue_index.get_value_buff()
class EnclosingMethodAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
ENCLOSING_METHOD_FORMAT = [ '>HH', "class_index method_index" ]
self.__CM = class_manager
super(EnclosingMethodAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 class_index
# u2 method_index;
self.__raw_buff = buff.read( calcsize( ENCLOSING_METHOD_FORMAT[0] ) )
self.format = SVs( ENCLOSING_METHOD_FORMAT[0], namedtuple( "EnclosingMethodFormat", ENCLOSING_METHOD_FORMAT[1] ), self.__raw_buff )
def show(self) :
print self.format
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.format.get_value_buff()
ATTRIBUTE_INFO_DESCR = {
"Code" : CodeAttribute,
"Deprecated" : DeprecatedAttribute,
"SourceFile" : SourceFileAttribute,
"Exceptions" : ExceptionsAttribute,
"LineNumberTable" : LineNumberTableAttribute,
"LocalVariableTable" : LocalVariableTableAttribute,
"LocalVariableTypeTable" : LocalVariableTypeTableAttribute,
"StackMapTable" : StackMapTableAttribute,
"InnerClasses" : InnerClassesAttribute,
"ConstantValue" : ConstantValueAttribute,
"EnclosingMethod" : EnclosingMethodAttribute,
"Signature" : SignatureAttribute,
"Synthetic" : SyntheticAttribute,
"SourceDebugExtension" : SourceDebugExtensionAttribute,
"RuntimeVisibleAnnotations" : RuntimeVisibleAnnotationsAttribute,
"RuntimeInvisibleAnnotations" : RuntimeInvisibleAnnotationsAttribute,
"RuntimeVisibleParameterAnnotations" : RuntimeVisibleParameterAnnotationsAttribute,
"RuntimeInvisibleParameterAnnotations" : RuntimeInvisibleParameterAnnotationsAttribute,
"AnnotationDefault" : AnnotationDefaultAttribute,
}
class AttributeInfo :
"""AttributeInfo manages each attribute info (Code, SourceFile ....)"""
def __init__(self, class_manager, buff) :
self.__CM = class_manager
self.__raw_buff = buff.read( calcsize( ATTRIBUTE_INFO[0] ) )
self.format = SVs( ATTRIBUTE_INFO[0], ATTRIBUTE_INFO[1], self.__raw_buff )
self.__name = self.__CM.get_string( self.format.get_value().attribute_name_index )
try :
self._info = ATTRIBUTE_INFO_DESCR[ self.__name ](self.__CM, buff)
except KeyError, ke :
bytecode.Exit( "AttributeInfo %s doesn't exit" % self.__name )
def get_item(self) :
"""Return the specific attribute info"""
return self._info
def get_name(self) :
"""Return the name of the attribute"""
return self.__name
def get_raw(self) :
v1 = self.format.get_value().attribute_length
v2 = len(self._info.get_raw())
if v1 != v2 :
self.set_attribute_length( v2 )
return self.format.get_value_buff() + self._info.get_raw()
def get_attribute_name_index(self) :
return self.format.get_value().attribute_name_index
def set_attribute_name_index(self, value) :
self.format.set_value( { "attribute_name_index" : value } )
def set_attribute_length(self, value) :
self.format.set_value( { "attribute_length" : value } )
def get_attributes(self) :
return self.format
def _fix_attributes(self, new_cm) :
self._info._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
self._info.set_cm( cm )
def show(self) :
print self.format, self.__name
if self._info != None :
self._info.show()
def pretty_show(self, m_a) :
print self.format, self.__name
if self._info != None :
if isinstance(self._info, CodeAttribute) :
self._info.pretty_show(m_a)
else :
self._info.show()
class ClassManager :
"""ClassManager can be used by all classes to get more information"""
def __init__(self, constant_pool, constant_pool_count) :
self.constant_pool = constant_pool
self.constant_pool_count = constant_pool_count
self.__this_class = None
def get_value(self, idx) :
name = self.get_item(idx[0]).get_name()
if name == "CONSTANT_Integer" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
elif name == "CONSTANT_String" :
return [ name, self.get_string( self.get_item(idx[0]).get_format().get_value().string_index ) ]
elif name == "CONSTANT_Class" :
return [ name, self.get_class( idx[0] ) ]
elif name == "CONSTANT_Fieldref" :
return [ name, self.get_field( idx[0] ) ]
elif name == "CONSTANT_Float" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
bytecode.Exit( "get_value not yet implemented for %s" % name )
def get_item(self, idx) :
return self.constant_pool[ idx - 1]
def get_interface(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_InterfaceMethodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_interface_index(self, class_name, name, descriptor) :
raise("ooo")
def get_method(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Methodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_method_index(self, class_name, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
m_class_name, m_name, m_descriptor = res
if m_class_name == class_name and m_name == name and m_descriptor == descriptor :
return idx
idx += 1
return -1
def get_field(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Fieldref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_field_index(self, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_field( idx )
if res != [] :
_, m_name, m_descriptor = res
if m_name == name and m_descriptor == descriptor :
return idx
idx += 1
def get_class(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Class" :
return []
return [ self.get_string( self.get_item(idx).get_name_index() ) ]
def get_array_type(self, idx) :
return ARRAY_TYPE[ idx[0] ]
def get_string_index(self, name) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Utf8" :
if i.get_bytes() == name :
return idx
idx += 1
return -1
def get_integer_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Integer" :
if i.get_format().get_value().bytes == value :
return idx
idx += 1
return -1
def get_cstring_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_String" :
if self.get_string( i.get_format().get_value().string_index ) == value :
return idx
idx += 1
return -1
def get_name_and_type_index(self, name_method_index, descriptor_method_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_NameAndType" :
value = i.get_format().get_value()
if value.name_index == name_method_index and value.descriptor_index == descriptor_method_index :
return idx
idx += 1
return -1
def get_class_by_index(self, name_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Class" :
value = i.get_format().get_value()
if value.name_index == name_index :
return idx
idx += 1
return -1
def get_method_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_field_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_class_index(self, method_name) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
_, name, _ = res
if name == method_name :
return i.get_class_index()
idx += 1
return -1
def get_class_index2(self, class_name) :
idx = 1
for i in self.constant_pool :
res = self.get_class( idx )
if res != [] :
name = res[0]
if name == class_name :
return idx
idx += 1
return -1
def get_used_fields(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
l.append( i )
return l
def get_used_methods(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
l.append( i )
return l
def get_string(self, idx) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
return self.constant_pool[idx - 1].get_bytes()
return None
def set_string(self, idx, name) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
self.constant_pool[idx - 1].set_bytes( name )
else :
bytecode.Exit( "invalid index %d to set string %s" % (idx, name) )
def add_string(self, name) :
name_index = self.get_string_index(name)
if name_index != -1 :
return name_index
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Utf8" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, len(name) ) + pack( ">%ss" % len(name), name )
ci = CONSTANT_INFO[ tag_value ][-1]( self, bytecode.BuffHandle( buff ) )
self.constant_pool.append( ci )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
return self.constant_pool_count.get_value() - 1
def set_this_class(self, this_class) :
self.__this_class = this_class
def get_this_class(self) :
return self.__this_class.get_value()
def get_this_class_name(self) :
return self.get_class( self.__this_class.get_value() )[0]
def add_constant_pool(self, elem) :
self.constant_pool.append( elem )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
def get_constant_pool_count(self) :
return self.constant_pool_count.get_value()
def create_class(self, name) :
class_name_index = self.add_string( name )
return self._create_class( class_name_index )
def _create_class(self, class_name_index) :
class_index = self.get_class_by_index( class_name_index )
if class_index == -1 :
new_class = CreateClass( self, class_name_index )
self.add_constant_pool( Class( self, bytecode.BuffHandle( new_class.get_raw() ) ) )
class_index = self.get_constant_pool_count() - 1
return class_index
def create_name_and_type(self, name, desc) :
name_index = self.add_string( name )
descriptor_index = self.add_string( desc )
return self._create_name_and_type( name_index, descriptor_index )
def create_name_and_type_by_index(self, name_method_index, descriptor_method_index) :
return self._create_name_and_type( name_method_index, descriptor_method_index )
def _create_name_and_type(self, name_method_index, descriptor_method_index) :
name_and_type_index = self.get_name_and_type_index( name_method_index, descriptor_method_index )
if name_and_type_index == -1 :
new_nat = CreateNameAndType( self, name_method_index, descriptor_method_index )
self.add_constant_pool( NameAndType( self, bytecode.BuffHandle( new_nat.get_raw() ) ) )
name_and_type_index = self.get_constant_pool_count() - 1
return name_and_type_index
def create_method_ref(self, new_class_index, new_name_and_type_index) :
new_mr_index = self.get_method_ref_index( new_class_index, new_name_and_type_index )
if new_mr_index == -1 :
new_mr = CreateMethodRef( self, new_class_index, new_name_and_type_index )
self.add_constant_pool( MethodRef( self, bytecode.BuffHandle( new_mr.get_raw() ) ) )
new_mr_index = self.get_constant_pool_count() - 1
return new_mr_index
def create_field_ref(self, new_class_index, new_name_and_type_index) :
new_fr_index = self.get_field_ref_index( new_class_index, new_name_and_type_index )
if new_fr_index == -1 :
new_fr = CreateFieldRef( self, new_class_index, new_name_and_type_index )
self.add_constant_pool( FieldRef( self, bytecode.BuffHandle( new_fr.get_raw() ) ) )
new_fr_index = self.get_constant_pool_count() - 1
return new_fr_index
def create_integer(self, value) :
new_int_index = self.get_integer_index( value )
if new_int_index == -1 :
new_int = CreateInteger( value )
self.add_constant_pool( Integer( self, bytecode.BuffHandle( new_int.get_raw() ) ) )
new_int_index = self.get_constant_pool_count() - 1
return new_int_index
def create_string(self, value) :
new_string_index = self.get_cstring_index( value )
if new_string_index == -1 :
new_string = CreateString( self, value )
self.add_constant_pool( String( self, bytecode.BuffHandle( new_string.get_raw() ) ) )
new_string_index = self.get_constant_pool_count() - 1
return new_string_index
class JVMFormat(bytecode._Bytecode) :
"""
An object which is the main class to handle properly a class file.
Exported fields : magic, minor_version, major_version, constant_pool_count, access_flags, this_class, super_class, interfaces_count, fields_count, methods_count, attributes_count
"""
def __init__(self, buff) :
"""
@param buff : the buffer which represents the open file
"""
super(JVMFormat, self).__init__( buff )
self._load_class()
def _load_class(self) :
# u4 magic;
# u2 minor_version;
# u2 major_version;
self.magic = SV( '>L', self.read( 4 ) )
self.minor_version = SV( '>H', self.read( 2 ) )
self.major_version = SV( '>H', self.read( 2 ) )
# u2 constant_pool_count;
self.constant_pool_count = SV( '>H', self.read( 2 ) )
# cp_info constant_pool[constant_pool_count-1];
self.constant_pool = []
self.__CM = ClassManager( self.constant_pool, self.constant_pool_count )
i = 1
while(i < self.constant_pool_count.get_value()) :
tag = SV( '>B', self.read_b( 1 ) )
if tag.get_value() not in CONSTANT_INFO :
bytecode.Exit( "tag %d not in CONSTANT_INFO" % tag.get_value() )
ci = CONSTANT_INFO[ tag.get_value() ][-1]( self.__CM, self )
self.constant_pool.append( ci )
i = i + 1
# CONSTANT_Long or CONSTANT_Double
# If a CONSTANT_Long_info or CONSTANT_Double_info structure is the item
# in the constant_pool table at index n, then the next usable item in the pool is
# located at index n + 2. The constant_pool index n + 1 must be valid but is
# considered unusable.
if tag.get_value() == 5 or tag.get_value() == 6 :
self.constant_pool.append( EmptyConstant() )
i = i + 1
# u2 access_flags;
# u2 this_class;
# u2 super_class;
self.access_flags = SV( '>H', self.read( 2 ) )
self.this_class = SV( '>H', self.read( 2 ) )
self.super_class = SV( '>H', self.read( 2 ) )
self.__CM.set_this_class( self.this_class )
# u2 interfaces_count;
self.interfaces_count = SV( '>H', self.read( 2 ) )
# u2 interfaces[interfaces_count];
self.interfaces = []
for i in range(0, self.interfaces_count.get_value()) :
tag = SV( '>H', self.read( 2 ) )
self.interfaces.append( tag )
# u2 fields_count;
self.fields_count = SV( '>H', self.read( 2 ) )
# field_info fields[fields_count];
self.fields = []
for i in range(0, self.fields_count.get_value()) :
fi = FieldInfo( self.__CM, self )
self.fields.append( fi )
# u2 methods_count;
self.methods_count = SV( '>H', self.read( 2 ) )
# method_info methods[methods_count];
self.methods = []
for i in range(0, self.methods_count.get_value()) :
mi = MethodInfo( self.__CM, self )
self.methods.append( mi )
# u2 attributes_count;
self.attributes_count = SV( '>H', self.read( 2 ) )
# attribute_info attributes[attributes_count];
self.__attributes = []
for i in range(0, self.attributes_count.get_value()) :
ai = AttributeInfo( self.__CM, self )
self.__attributes.append( ai )
def get_class(self, class_name) :
"""
Verify the name of the class
@param class_name : the name of the class
@rtype : True if the class name is valid, otherwise it's False
"""
x = self.__CM.get_this_class_name() == class_name
if x == True :
return x
return self.__CM.get_this_class_name() == class_name.replace(".", "/")
def get_classes_names(self) :
"""
Return the names of classes
"""
return [ self.__CM.get_this_class_name() ]
def get_name(self) :
"""
"""
return self.__CM.get_this_class_name()
def get_classes(self) :
"""
"""
return [ self ]
def get_field(self, name) :
"""
Return into a list all fields which corresponds to the regexp
@param name : the name of the field (a regexp)
"""
prog = re.compile( name )
fields = []
for i in self.fields :
if prog.match( i.get_name() ) :
fields.append( i )
return fields
def get_method_descriptor(self, class_name, method_name, descriptor) :
"""
Return the specific method
@param class_name : the class name of the method
@param method_name : the name of the method
@param descriptor : the descriptor of the method
@rtype: L{MethodInfo}
"""
# FIXME : handle multiple class name ?
if class_name != None :
if class_name != self.__CM.get_this_class_name() :
return None
for i in self.methods :
if method_name == i.get_name() and descriptor == i.get_descriptor() :
return i
return None
def get_field_descriptor(self, class_name, field_name, descriptor) :
"""
Return the specific field
@param class_name : the class name of the field
@param field_name : the name of the field
@param descriptor : the descriptor of the field
@rtype: L{FieldInfo}
"""
# FIXME : handle multiple class name ?
if class_name != None :
if class_name != self.__CM.get_this_class_name() :
return None
for i in self.fields :
if field_name == i.get_name() and descriptor == i.get_descriptor() :
return i
return None
def get_method(self, name) :
"""Return into a list all methods which corresponds to the regexp
@param name : the name of the method (a regexp)
"""
prog = re.compile( name )
methods = []
for i in self.methods :
if prog.match( i.get_name() ) :
methods.append( i )
return methods
def get_all_fields(self) :
return self.fields
def get_fields(self) :
"""Return all objects fields"""
return self.fields
def get_methods(self) :
"""Return all objects methods"""
return self.methods
def get_constant_pool(self) :
"""Return the constant pool list"""
return self.constant_pool
def get_strings(self) :
"""Return all strings into the class"""
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Utf8" :
l.append( i.get_bytes() )
return l
def get_class_manager(self) :
"""
Return directly the class manager
@rtype : L{ClassManager}
"""
return self.__CM
def set_used_field(self, old, new) :
"""
Change the description of a field
@param old : a list of string which contained the original class name, the original field name and the original descriptor
@param new : a list of string which contained the new class name, the new field name and the new descriptor
"""
used_fields = self.__CM.get_used_fields()
for i in used_fields :
class_idx = i.format.get_value().class_index
name_and_type_idx = i.format.get_value().name_and_type_index
class_name = self.__CM.get_string( self.__CM.get_item(class_idx).get_name_index() )
field_name = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_name_index() )
descriptor = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index() )
if old[0] == class_name and old[1] == field_name and old[2] == descriptor :
# print "SET USED FIELD", class_name, method_name, descriptor
self.__CM.set_string( self.__CM.get_item(class_idx).get_name_index(), new[0] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_name_index(), new[1] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index(), new[2] )
def set_used_method(self, old, new) :
"""
Change the description of a method
@param old : a list of string which contained the original class name, the original method name and the original descriptor
@param new : a list of string which contained the new class name, the new method name and the new descriptor
"""
used_methods = self.__CM.get_used_methods()
for i in used_methods :
class_idx = i.format.get_value().class_index
name_and_type_idx = i.format.get_value().name_and_type_index
class_name = self.__CM.get_string( self.__CM.get_item(class_idx).get_name_index() )
method_name = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_name_index() )
descriptor = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index() )
if old[0] == class_name and old[1] == method_name and old[2] == descriptor :
# print "SET USED METHOD", class_name, method_name, descriptor
self.__CM.set_string( self.__CM.get_item(class_idx).get_name_index(), new[0] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_name_index(), new[1] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index(), new[2] )
def show(self) :
"""
Show the .class format into a human readable format
"""
bytecode._Print( "MAGIC", self.magic.get_value() )
bytecode._Print( "MINOR VERSION", self.minor_version.get_value() )
bytecode._Print( "MAJOR VERSION", self.major_version.get_value() )
bytecode._Print( "CONSTANT POOL COUNT", self.constant_pool_count.get_value() )
nb = 0
for i in self.constant_pool :
print nb,
i.show()
nb += 1
bytecode._Print( "ACCESS FLAGS", self.access_flags.get_value() )
bytecode._Print( "THIS CLASS", self.this_class.get_value() )
bytecode._Print( "SUPER CLASS", self.super_class.get_value() )
bytecode._Print( "INTERFACE COUNT", self.interfaces_count.get_value() )
nb = 0
for i in self.interfaces :
print nb,
print i
bytecode._Print( "FIELDS COUNT", self.fields_count.get_value() )
nb = 0
for i in self.fields :
print nb,
i.show()
nb += 1
bytecode._Print( "METHODS COUNT", self.methods_count.get_value() )
nb = 0
for i in self.methods :
print nb,
i.show()
nb += 1
bytecode._Print( "ATTRIBUTES COUNT", self.attributes_count.get_value() )
nb = 0
for i in self.__attributes :
print nb,
i.show()
nb += 1
def pretty_show(self, vm_a) :
"""
Show the .class format into a human readable format
"""
bytecode._Print( "MAGIC", self.magic.get_value() )
bytecode._Print( "MINOR VERSION", self.minor_version.get_value() )
bytecode._Print( "MAJOR VERSION", self.major_version.get_value() )
bytecode._Print( "CONSTANT POOL COUNT", self.constant_pool_count.get_value() )
nb = 0
for i in self.constant_pool :
print nb,
i.show()
nb += 1
bytecode._Print( "ACCESS FLAGS", self.access_flags.get_value() )
bytecode._Print( "THIS CLASS", self.this_class.get_value() )
bytecode._Print( "SUPER CLASS", self.super_class.get_value() )
bytecode._Print( "INTERFACE COUNT", self.interfaces_count.get_value() )
nb = 0
for i in self.interfaces :
print nb,
i.show()
bytecode._Print( "FIELDS COUNT", self.fields_count.get_value() )
nb = 0
for i in self.fields :
print nb,
i.show()
nb += 1
bytecode._Print( "METHODS COUNT", self.methods_count.get_value() )
nb = 0
for i in self.methods :
print nb,
i.pretty_show(vm_a)
nb += 1
bytecode._Print( "ATTRIBUTES COUNT", self.attributes_count.get_value() )
nb = 0
for i in self.__attributes :
print nb,
i.show()
def insert_string(self, value) :
"""Insert a string into the constant pool list (Constant_Utf8)
@param value : the new string
"""
self.__CM.add_string( value )
def insert_field(self, class_name, name, descriptor) :
"""
Insert a field into the class
@param class_name : the class of the field
@param name : the name of the field
@param descriptor : a list with the access_flag and the descriptor ( [ "ACC_PUBLIC", "I" ] )
"""
new_field = CreateFieldInfo( self.__CM, name, descriptor )
new_field = FieldInfo( self.__CM, bytecode.BuffHandle( new_field.get_raw() ) )
self.fields.append( new_field )
self.fields_count.set_value( self.fields_count.get_value() + 1 )
# Add a FieldRef and a NameAndType
name_and_type_index = self.__CM.create_name_and_type_by_index( new_field.get_name_index(), new_field.get_descriptor_index() )
self.__CM.create_field_ref( self.__CM.get_this_class(), name_and_type_index )
def insert_craft_method(self, name, proto, codes) :
"""
Insert a craft method into the class
@param name : the name of the new method
@param proto : a list which describe the method ( [ ACCESS_FLAGS, RETURN_TYPE, ARGUMENTS ], ie : [ "ACC_PUBLIC", "[B", "[B" ] )
@param codes : a list which represents the code into a human readable format ( [ "aconst_null" ], [ "areturn" ] ] )
"""
# Create new method
new_method = CreateMethodInfo(self.__CM, name, proto, codes)
# Insert the method by casting it directly into a MethodInfo with the raw buffer
self._insert_basic_method( MethodInfo( self.__CM, bytecode.BuffHandle( new_method.get_raw() ) ) )
def insert_direct_method(self, name, ref_method) :
"""
Insert a direct method (MethodInfo object) into the class
@param name : the name of the new method
@param ref_method : the MethodInfo Object
"""
if ref_method == None :
return
# Change the name_index
name_index = self.__CM.get_string_index( name )
if name_index != -1 :
bytecode.Exit( "method %s already exits" % name )
name_index = self.__CM.add_string( name )
ref_method.set_name_index( name_index )
# Change the descriptor_index
descriptor_index = self.__CM.get_string_index( ref_method.get_descriptor() )
if descriptor_index == -1 :
descriptor_index = self.__CM.add_string( ref_method.get_descriptor() )
ref_method.set_descriptor_index( descriptor_index )
# Change attributes name index
self._fix_attributes_external( ref_method )
# Change internal index
self._fix_attributes_internal( ref_method )
# Insert the method
self._insert_basic_method( ref_method )
def _fix_attributes_external(self, ref_method) :
for i in ref_method.get_attributes() :
attribute_name_index = self.__CM.add_string( i.get_name() )
i.set_attribute_name_index( attribute_name_index )
self._fix_attributes_external( i.get_item() )
def _fix_attributes_internal(self, ref_method) :
for i in ref_method.get_attributes() :
attribute_name_index = self.__CM.add_string( i.get_name() )
i._fix_attributes( self.__CM )
i.set_attribute_name_index( attribute_name_index )
def _insert_basic_method(self, ref_method) :
# Add a MethodRef and a NameAndType
name_and_type_index = self.__CM.create_name_and_type_by_index( ref_method.get_name_index(), ref_method.get_descriptor_index() )
self.__CM.create_method_ref( self.__CM.get_this_class(), name_and_type_index )
# Change the class manager
ref_method.set_cm( self.__CM )
# Insert libraries/constants dependances
methods = ref_method._patch_bytecodes()
# FIXME : insert needed fields + methods
prog = re.compile( "^java*" )
for i in methods :
if prog.match( i[0] ) == None :
bytecode.Exit( "ooooops" )
#ref_method.show()
# Insert the method
self.methods.append( ref_method )
self.methods_count.set_value( self.methods_count.get_value() + 1 )
def _get_raw(self) :
# u4 magic;
# u2 minor_version;
# u2 major_version;
buff = self.magic.get_value_buff()
buff += self.minor_version.get_value_buff()
buff += self.major_version.get_value_buff()
# u2 constant_pool_count;
buff += self.constant_pool_count.get_value_buff()
# cp_info constant_pool[constant_pool_count-1];
for i in self.constant_pool :
buff += i.get_raw()
# u2 access_flags;
# u2 this_class;
# u2 super_class;
buff += self.access_flags.get_value_buff()
buff += self.this_class.get_value_buff()
buff += self.super_class.get_value_buff()
# u2 interfaces_count;
buff += self.interfaces_count.get_value_buff()
# u2 interfaces[interfaces_count];
for i in self.interfaces :
buff += i.get_value_buff()
# u2 fields_count;
buff += self.fields_count.get_value_buff()
# field_info fields[fields_count];
for i in self.fields :
buff += i.get_raw()
# u2 methods_count;
buff += self.methods_count.get_value_buff()
# method_info methods[methods_count];
for i in self.methods :
buff += i.get_raw()
# u2 attributes_count;
buff += self.attributes_count.get_value_buff()
# attribute_info attributes[attributes_count];
for i in self.__attributes :
buff += i.get_raw()
return buff
def save(self) :
"""
Return the class (with the modifications) into raw format
@rtype: string
"""
return self._get_raw()
def set_vmanalysis(self, vmanalysis) :
pass
def get_generator(self) :
import jvm_generate
return jvm_generate.JVMGenerate
def get_INTEGER_INSTRUCTIONS(self) :
return INTEGER_INSTRUCTIONS
def get_type(self) :
return "JVM"
|
src/genie/libs/parser/iosxe/tests/ShowIpVrfDetail/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204 |
140580
|
expected_output = {
"Mgmt-intf": {
"address_family": {
"ipv4 unicast": {
"flags": "0x0",
"table_id": "0x1",
"vrf_label": {"allocation_mode": "per-prefix"},
}
},
"cli_format": "New",
"flags": "0x1808",
"interface": {"GigabitEthernet1": {"vrf": "Mgmt-intf"}},
"interfaces": ["GigabitEthernet1"],
"support_af": "multiple address-families",
"vrf_id": 1,
},
"VRF1": {
"address_family": {
"ipv4 unicast": {
"flags": "0x0",
"table_id": "0x2",
"vrf_label": {
"allocation_mode": "per-prefix",
"distribution_protocol": "LDP",
},
}
},
"cli_format": "New",
"flags": "0x180C",
"interface": {
"GigabitEthernet2.390": {"vrf": "VRF1"},
"GigabitEthernet2.410": {"vrf": "VRF1"},
"GigabitEthernet2.415": {"vrf": "VRF1"},
"GigabitEthernet2.420": {"vrf": "VRF1"},
"GigabitEthernet3.390": {"vrf": "VRF1"},
"GigabitEthernet3.410": {"vrf": "VRF1"},
"GigabitEthernet3.415": {"vrf": "VRF1"},
"GigabitEthernet3.420": {"vrf": "VRF1"},
"Loopback300": {"vrf": "VRF1"},
"Tunnel1": {"vrf": "VRF1"},
"Tunnel3": {"vrf": "VRF1"},
"Tunnel4": {"vrf": "VRF1"},
"Tunnel6": {"vrf": "VRF1"},
"Tunnel8": {"vrf": "VRF1"},
},
"interfaces": [
"Tunnel1",
"Loopback300",
"GigabitEthernet2.390",
"GigabitEthernet2.410",
"GigabitEthernet2.415",
"GigabitEthernet2.420",
"GigabitEthernet3.390",
"GigabitEthernet3.410",
"GigabitEthernet3.415",
"GigabitEthernet3.420",
"Tunnel3",
"Tunnel4",
"Tunnel6",
"Tunnel8",
],
"route_distinguisher": "65000:1",
"support_af": "multiple address-families",
"vrf_id": 2,
},
}
|
test/win/gyptest-link-safeseh.py
|
chlorm-forks/gyp
| 2,151 |
140588
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure safeseh setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp()
CHDIR = 'linker-flags'
test.run_gyp('safeseh.gyp', chdir=CHDIR)
test.build('safeseh.gyp', test.ALL, chdir=CHDIR)
def HasSafeExceptionHandlers(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/LOADCONFIG', full_path)
return ' Safe Exception Handler Table' in output
# From MSDN: http://msdn.microsoft.com/en-us/library/9a89h429.aspx
# If /SAFESEH is not specified, the linker will produce an image with a
# table of safe exceptions handlers if all modules are compatible with
# the safe exception handling feature. If any modules were not
# compatible with safe exception handling feature, the resulting image
# will not contain a table of safe exception handlers.
# However, the msvs IDE passes /SAFESEH to the linker by default, if
# ImageHasSafeExceptionHandlers is not set to false in the vcxproj file.
# We emulate this behavior in msvs_emulation.py, so 'test_safeseh_default'
# and 'test_safeseh_yes' are built identically.
if not HasSafeExceptionHandlers('test_safeseh_default.exe'):
test.fail_test()
if HasSafeExceptionHandlers('test_safeseh_no.exe'):
test.fail_test()
if not HasSafeExceptionHandlers('test_safeseh_yes.exe'):
test.fail_test()
if HasSafeExceptionHandlers('test_safeseh_x64.exe'):
test.fail_test()
test.pass_test()
|
Lib/objc/_MIME.py
|
snazari/Pyto
| 701 |
140591
|
"""
Classes from the 'MIME' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MFWeakProxy = _Class("MFWeakProxy")
MFWeakReferenceHolder = _Class("MFWeakReferenceHolder")
MFArrayDiff = _Class("MFArrayDiff")
MFMutableMessageHeadersFactory = _Class("MFMutableMessageHeadersFactory")
MFTypeInfo = _Class("MFTypeInfo")
MFMessageTextAttachment = _Class("MFMessageTextAttachment")
MFMimeTextAttachment = _Class("MFMimeTextAttachment")
MFPartialNetworkDataConsumer = _Class("MFPartialNetworkDataConsumer")
MFMimePart = _Class("MFMimePart")
MFMimeCharset = _Class("MFMimeCharset")
MFMessageStoreObjectCache = _Class("MFMessageStoreObjectCache")
MFHTMLParser = _Class("MFHTMLParser")
MFDiagnostics = _Class("MFDiagnostics")
MFZeroCopyDataConsumer = _Class("MFZeroCopyDataConsumer")
MFDataHolder = _Class("MFDataHolder")
MFBlockDataConsumer = _Class("MFBlockDataConsumer")
MFNullDataConsumer = _Class("MFNullDataConsumer")
MFCountingDataConsumer = _Class("MFCountingDataConsumer")
MFBufferedDataConsumer = _Class("MFBufferedDataConsumer")
MFMessageDataContainer = _Class("MFMessageDataContainer")
MFMessageStore = _Class("MFMessageStore")
MFDataMessageStore = _Class("MFDataMessageStore")
MFMessageHeaders = _Class("MFMessageHeaders")
MFMutableMessageHeaders = _Class("MFMutableMessageHeaders")
MFMessageFileWrapper = _Class("MFMessageFileWrapper")
MFPlaceholderFileWrapper = _Class("MFPlaceholderFileWrapper")
MFMessageBody = _Class("MFMessageBody")
MFMimeBody = _Class("MFMimeBody")
MFMessage = _Class("MFMessage")
_MFEmailSetEmail = _Class("_MFEmailSetEmail")
MFBaseFilterDataConsumer = _Class("MFBaseFilterDataConsumer")
MFUUDecoder = _Class("MFUUDecoder")
MFQuotedPrintableDecoder = _Class("MFQuotedPrintableDecoder")
MFQuotedPrintableEncoder = _Class("MFQuotedPrintableEncoder")
MFProgressFilterDataConsumer = _Class("MFProgressFilterDataConsumer")
MFRangedDataFilter = _Class("MFRangedDataFilter")
MFLineEndingConverterFilter = _Class("MFLineEndingConverterFilter")
MFMutableFilterDataConsumer = _Class("MFMutableFilterDataConsumer")
MFBase64Decoder = _Class("MFBase64Decoder")
MFBase64Encoder = _Class("MFBase64Encoder")
MFConditionLock = _Class("MFConditionLock")
MFRecursiveLock = _Class("MFRecursiveLock")
MFLock = _Class("MFLock")
_MFEmailSetEnumerator = _Class("_MFEmailSetEnumerator")
MFData = _Class("MFData")
MFMutableData = _Class("MFMutableData")
MFWeakSet = _Class("MFWeakSet")
MFEmailSet = _Class("MFEmailSet")
|
ck/repo/module/index/module.py
|
santosh653/ck
| 480 |
140594
|
#
# Collective Knowledge (indexing through ElasticSearch)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>
#
cfg = {} # Will be updated by CK (meta description of this module)
work = {} # Will be updated by CK (temporal data)
ck = None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return': 0}
##############################################################################
# turn indexing on
def on(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o = i.get('out', '')
i['status'] = 'yes'
r = status(i)
if r['return'] > 0:
return r
if o == 'con':
ck.out('Indexing is on')
return {'return': 0}
##############################################################################
# turn indexing off
def off(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o = i.get('out', '')
i['status'] = 'no'
r = status(i)
if r['return'] > 0:
return r
if o == 'con':
ck.out('Indexing is off')
return {'return': 0}
##############################################################################
# show indexing status
def show(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o = i.get('out', '')
i['status'] = ''
r = status(i)
if r['return'] > 0:
return r
s = r['status']
if s == 'yes':
sx = 'on'
else:
sx = 'off'
if o == 'con':
ck.out('Indexing status: '+sx)
return {'return': 0}
##############################################################################
# check indexing status
def status(i):
"""
Input: {
status - if 'yes', turn it on
if 'no', turn it off
if '', return status
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
status
}
"""
# Get current configuration
cfg = {}
r = ck.access({'action': 'load',
'repo_uoa': ck.cfg['repo_name_default'],
'module_uoa': ck.cfg['subdir_kernel'],
'data_uoa': ck.cfg['subdir_kernel_default']})
if r['return'] == 0:
cfg.update(r['dict'])
r = ck.access({'action': 'load',
'repo_uoa': ck.cfg['repo_name_local'],
'module_uoa': ck.cfg['subdir_kernel'],
'data_uoa': ck.cfg['subdir_kernel_default']})
if r['return'] > 0:
if r['return'] != 16:
return r
ck.out('')
ck.out('We strongly suggest you to setup local repository first!')
return {'return': 0}
cfg.update(r['dict'])
# Turn on indexing
st = i.get('status', '')
s = cfg.get('use_indexing', ck.cfg.get('use_indexing', ''))
if st != '':
cfg['use_indexing'] = st
s = st
r = ck.access({'action': 'update',
'repo_uoa': ck.cfg['repo_name_local'],
'module_uoa': ck.cfg['subdir_kernel'],
'data_uoa': ck.cfg['subdir_kernel_default'],
'dict': cfg,
'substitute': 'yes',
'ignore_update': 'yes',
'skip_indexing': 'yes'})
if r['return'] > 0:
return r
return {'return': 0, 'status': s}
##############################################################################
# check indexing status
def test(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o = i.get('out', '')
r = ck.access_index_server({'request': 'TEST', 'dict': {}})
if r['return'] > 0:
return r
dd = r['dict']
status = dd.get('status', 0)
if status != 200:
return {'return': 1, 'error': 'returned status is not 200'}
version = dd.get('version', {}).get('number', '')
if o == 'con':
ck.out('Indexing server is working (version = '+version+')')
return r
##############################################################################
# clean whole index
def clean(i):
"""
Input: {
(force) - if 'yes', force cleaning
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o = i.get('out', '')
to_delete = True
if o == 'con' and i.get('force', '') != 'yes':
r = ck.inp({'text': 'Are you sure to clean the whole index (y/N): '})
c = r['string'].lower()
if c != 'y' and c != 'yes':
to_delete = False
if to_delete:
r = ck.access_index_server(
{'request': 'DELETE', 'path': '/_all', 'dict': {}})
if r['return'] > 0:
return r
dd = r['dict']
status = dd.get('status', 0)
err = dd.get('error', '')
if err != '':
r = {'return': 1, 'error': err}
return r
|
tools/crunchstat-summary/crunchstat_summary/webchart.py
|
rpatil524/arvados
| 222 |
140596
|
<gh_stars>100-1000
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0
try:
from html import escape
except ImportError:
from cgi import escape
import json
import pkg_resources
class WebChart(object):
"""Base class for a web chart.
Subclasses must assign JSLIB and JSASSETS, and override the
chartdata() method.
"""
JSLIB = None
JSASSET = None
def __init__(self, label, summarizers):
self.label = label
self.summarizers = summarizers
def html(self):
return '''<!doctype html><html><head>
<title>{} stats</title>
<script type="text/javascript" src="{}"></script>
<script type="text/javascript">{}</script>
{}
</head><body></body></html>
'''.format(escape(self.label),
self.JSLIB, self.js(), self.headHTML())
def js(self):
return 'var chartdata = {};\n{}'.format(
json.dumps(self.sections()),
'\n'.join([pkg_resources.resource_string('crunchstat_summary', jsa).decode('utf-8') for jsa in self.JSASSETS]))
def sections(self):
return [
{
'label': s.long_label(),
'charts': [
self.chartdata(s.label, s.tasks, stat)
for stat in (('cpu', ['user+sys__rate', 'user__rate', 'sys__rate']),
('mem', ['rss']),
('net:eth0', ['tx+rx__rate','rx__rate','tx__rate']),
('net:keep0', ['tx+rx__rate','rx__rate','tx__rate']),
('statfs', ['used', 'total']),
)
],
}
for s in self.summarizers]
def chartdata(self, label, tasks, stat):
"""Return chart data for the given tasks.
The returned value will be available on the client side as an
element of the "chartdata" array.
"""
raise NotImplementedError()
def headHTML(self):
"""Return extra HTML text to include in HEAD."""
return ''
|
cogan/data/compute_optimal_alignment.py
|
sagardsaxena/CoGAN
| 285 |
140626
|
#!/usr/bin/env python
'''
/* Copyright 2016, <NAME>
All Rights Reserved
Permission to use, copy, modify, and distribute this software and
its documentation for any non-commercial purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that copyright notice and this permission
notice appear in supporting documentation, and that the name of
the author not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
ANY PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
'''
import numpy as np
import cv2
from sklearn.metrics import mean_squared_error
imgU = cv2.imread("usps_mean.png",0)
imgM = cv2.imread("mnist_mean.png",0)
for p in range(0,10):
newU = np.zeros(shape=(16+2*p,16+2*p),dtype=np.float32)
h = newU.shape[0]
newU[p:h-p,p:h-p] = np.float32(imgU)
resizedU = cv2.resize(newU,(28,28))
resizedU - np.float32(imgM)
error = mean_squared_error(resizedU, np.float32(imgM))
print "%d: %f" %(p,error)
# The optimal padding size is 3 on each side
|
chainer_chemistry/models/gwm/gwm.py
|
pfnet/chainerchem
| 184 |
140637
|
<gh_stars>100-1000
import chainer
from chainer import functions
from chainer import links
from chainer_chemistry.links import GraphLinear
class WarpGateUnit(chainer.Chain):
"""WarpGateUnit
It computes gated-sum mixing `merged` feature from normal node feature `h`
and super node feature `g`,
See Section "3.4 Warp Gate" of the paper.
Args:
output_type (str): supported type as below.
graph:
super:
hidden_dim (int): hidden dim
dropout_ratio (float): negative value indicates to not apply dropout.
activation (callable):
"""
def __init__(self, output_type='graph', hidden_dim=16,
dropout_ratio=-1, activation=functions.sigmoid):
super(WarpGateUnit, self).__init__()
if output_type == 'graph':
LinearLink = GraphLinear
elif output_type == 'super':
LinearLink = links.Linear
else:
raise ValueError(
'output_type = {} is unexpected. graph or super is supported.'
.format(output_type))
with self.init_scope():
self.H = LinearLink(in_size=hidden_dim, out_size=hidden_dim)
self.G = LinearLink(in_size=hidden_dim, out_size=hidden_dim)
self.hidden_dim = hidden_dim
self.dropout_ratio = dropout_ratio
self.output_type = output_type
self.activation = activation
def __call__(self, h, g):
# TODO(nakago): more efficient computation. Maybe we can calculate
# self.G(g) as Linear layer followed by broadcast to each atom.
z = self.H(h) + self.G(g)
if self.dropout_ratio > 0.0:
z = functions.dropout(z, ratio=self.dropout_ratio)
z = self.activation(z)
merged = (1 - z) * h + z * g
return merged
class SuperNodeTransmitterUnit(chainer.Chain):
"""SuperNodeTransmitterUnit
It calculates message from super node to normal node.
Args:
hidden_dim_super (int):
hidden_dim (int): hiddem dim for
dropout_ratio (float): negative value indicates to not apply dropout.
"""
def __init__(self, hidden_dim_super=16, hidden_dim=16, dropout_ratio=-1):
super(SuperNodeTransmitterUnit, self).__init__()
with self.init_scope():
self.F_super = links.Linear(in_size=hidden_dim_super,
out_size=hidden_dim)
self.hidden_dim = hidden_dim
self.hidden_dim_super = hidden_dim_super
self.dropout_ratio = dropout_ratio
def __call__(self, g, n_nodes):
"""main calculation
Args:
g: super node feature. shape (bs, hidden_dim_super)
n_nodes (int): number of nodes
Returns:
g_trans: super --> original transmission
"""
mb = len(g)
# for local updates
g_trans = self.F_super(g)
# intermediate_h_super.shape == (mb, self.hidden_dim)
g_trans = functions.tanh(g_trans)
# intermediate_h_super.shape == (mb, 1, self.hidden_dim)
g_trans = functions.expand_dims(g_trans, 1)
# intermediate_h_super.shape == (mb, atom, self.hidden_dim)
g_trans = functions.broadcast_to(g_trans,
(mb, n_nodes, self.hidden_dim))
return g_trans
class GraphTransmitterUnit(chainer.Chain):
"""GraphTransmitterUnit
It calculates message from normal node to super node.
Args:
hidden_dim_super (int):
hidden_dim (int):
n_heads (int):
dropout_ratio (float):
activation (callable):
"""
def __init__(self, hidden_dim_super=16, hidden_dim=16, n_heads=8,
dropout_ratio=-1, activation=functions.tanh):
super(GraphTransmitterUnit, self).__init__()
hdim_n = hidden_dim * n_heads
with self.init_scope():
self.V_super = GraphLinear(hidden_dim, hdim_n)
self.W_super = links.Linear(hdim_n, hidden_dim_super)
self.B = GraphLinear(hidden_dim, n_heads * hidden_dim_super)
self.hidden_dim = hidden_dim
self.hidden_dim_super = hidden_dim_super
self.dropout_ratio = dropout_ratio
self.n_heads = n_heads
self.activation = activation
def __call__(self, h, g, step=0):
mb, atom, ch = h.shape
h_j = self.V_super(h)
h_j = functions.reshape(h_j, (mb, atom, self.n_heads, ch))
# h_j (mb, atom, self.n_heads, ch)
h_j = functions.transpose(h_j, (0, 2, 1, 3))
# expand h_super
# g_extend.shape (mb, 1, self.hidden_dim_super)
g_extend = functions.expand_dims(g, 1)
# g_extend.shape == (mb, self.n_heads, self.hidden_dim_super)
g_extend = functions.broadcast_to(g_extend, (mb, self.n_heads,
self.hidden_dim_super))
# g_extend.shape == (mb, self.n_heads, 1, self.hidden_dim_super)
g_extend = functions.expand_dims(g_extend, 2)
# update for attention-message B h_i
# h (mb, atom, ch)
# Bh_i.shape == (mb, atom, self.n_heads * self.hidden_dim_super)
Bh_i = self.B(h)
# Bh_i.shpae == (mb, atom, num_head, ch)
Bh_i = functions.reshape(Bh_i, (mb, atom, self.n_heads,
self.hidden_dim_super))
# Bh_i.shape == (mb, num_head, atom, ch)
Bh_i = functions.transpose(Bh_i, [0, 2, 1, 3])
# take g^{T} * B * h_i
# indexed by i
# mb, self.n_haeds atom(i)
# b_hi.shape == (mb, self.n_heads, 1, atom)
# This will reduce the last hidden_dim_super axis
b_hi = functions.matmul(g_extend, Bh_i, transb=True)
# softmax. sum/normalize over the last axis.
# mb, self.n_heda, atom(i-normzlied)
# attention_i.shape == (mb, self.n_heads, 1, atom)
attention_i = functions.softmax(b_hi, axis=3)
if self.dropout_ratio > 0.0:
attention_i = functions.dropout(attention_i,
ratio=self.dropout_ratio)
# element-wise product --> sum over i
# mb, num_head, hidden_dim_super
# attention_sum.shape == (mb, self.n_heads, 1, ch)
attention_sum = functions.matmul(attention_i, h_j)
# attention_sum.shape == (mb, self.n_heads * ch)
attention_sum = functions.reshape(attention_sum,
(mb, self.n_heads * ch))
# weighting h for different heads
# intermediate_h.shape == (mb, self.n_heads * ch)
# compress heads
h_trans = self.W_super(attention_sum)
# intermediate_h.shape == (mb, self.hidden_dim_super)
h_trans = self.activation(h_trans)
return h_trans
class GWM(chainer.Chain):
"""Graph Warping Module (GWM)
Module for a single layer update.
See: Ishiguro, Maeda, and Koyama. "Graph Warp Module: an Auxiliary Module
for Boosting the Power of Graph NeuralNetworks", arXiv, 2019.
Args:
hidden_dim (int): dimension of hidden vectors
associated to each atom (local node)
hidden_dim_super (int); dimension of super-node hidden vector
n_layers (int): number of layers
n_heads (int): number of heads
dropout_ratio (float): dropout ratio.
Negative value indicates to not apply dropout.
tying_flag (bool): enable if you want to share params across layers.
activation (callable):
wgu_activation (callable):
gtu_activation (callable):
"""
def __init__(self, hidden_dim=16, hidden_dim_super=16, n_layers=4,
n_heads=8, dropout_ratio=-1,
tying_flag=False, activation=functions.relu,
wgu_activation=functions.sigmoid,
gtu_activation=functions.tanh):
super(GWM, self).__init__()
n_use_layers = 1 if tying_flag else n_layers
with self.init_scope():
self.update_super = chainer.ChainList(
*[links.Linear(in_size=hidden_dim_super,
out_size=hidden_dim_super)
for _ in range(n_use_layers)]
)
# for Transmitter unit
self.super_transmitter = chainer.ChainList(
*[SuperNodeTransmitterUnit(
hidden_dim=hidden_dim, hidden_dim_super=hidden_dim_super,
dropout_ratio=dropout_ratio) for _ in range(n_use_layers)])
self.graph_transmitter = chainer.ChainList(
*[GraphTransmitterUnit(
hidden_dim=hidden_dim, hidden_dim_super=hidden_dim_super,
n_heads=n_heads, dropout_ratio=dropout_ratio,
activation=gtu_activation) for _ in range(n_use_layers)])
# for Warp Gate unit
self.wgu_local = chainer.ChainList(
*[WarpGateUnit(
output_type='graph', hidden_dim=hidden_dim,
dropout_ratio=dropout_ratio, activation=wgu_activation)
for _ in range(n_use_layers)])
self.wgu_super = chainer.ChainList(
*[WarpGateUnit(
output_type='super', hidden_dim=hidden_dim_super,
dropout_ratio=dropout_ratio, activation=wgu_activation)
for _ in range(n_use_layers)])
# Weight tying: not layer-wise but recurrent through layers
self.GRU_local = links.GRU(in_size=hidden_dim, out_size=hidden_dim)
self.GRU_super = links.GRU(in_size=hidden_dim_super,
out_size=hidden_dim_super)
# end init_scope-with
self.hidden_dim = hidden_dim
self.hidden_dim_super = hidden_dim_super
self.n_layers = n_layers
self.n_heads = n_heads
self.dropout_ratio = dropout_ratio
self.tying_flag = tying_flag
self.activation = activation
self.wgu_activation = wgu_activation
def __call__(self, h, h_new, g, step=0):
"""main calculation
Note: Do not forget to reset GRU for each batch.
Args:
h: Minibatch by num_nodes by hidden_dim numpy array.
current local node hidden states as input of the vanilla GNN
h_new: Minibatch by num_nodes by hidden_dim numpy array.
updated local node hidden states as output from the vanilla GNN
g: Minibatch by bond_types by num_nodes by num_nodes 1/0
array. Adjacency matrices over several bond types
step: Minibatch by hidden_dim_super numpy array.
current super node hiddden state
Returns: Updated h and g
"""
# (minibatch, atom, ch)
mb, n_nodes, ch = h.shape
# non linear update of the super node
g_new = self.activation(self.update_super[step](g))
# Transmitter unit: inter-module message passing
# original --> super transmission
h_trans = self.graph_transmitter[step](h, g)
# g_trans: super --> original transmission
g_trans = self.super_transmitter[step](g, n_nodes)
# Warp Gate unit
merged_h = self.wgu_local[step](h_new, g_trans)
merged_g = self.wgu_super[step](h_trans, g_new)
# Self recurrent
out_h = functions.reshape(merged_h, (mb * n_nodes, self.hidden_dim))
out_h = self.GRU_local(out_h)
out_h = functions.reshape(out_h, (mb, n_nodes, self.hidden_dim))
out_g = self.GRU_super(merged_g)
return out_h, out_g
def reset_state(self):
self.GRU_local.reset_state()
self.GRU_super.reset_state()
|
local_flow/rec/src/custom_decorators.py
|
JSpenced/you-dont-need-a-bigger-boat
| 356 |
140665
|
<gh_stars>100-1000
"""
Custom decorators for use with Metaflow
"""
from functools import wraps
def pip(libraries):
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
import subprocess
import sys
for library, version in libraries.items():
print('Pip Install:', library, version)
if version != '':
subprocess.run([sys.executable, '-m', 'pip',
'install', library + '==' + version])
else:
subprocess.run(
[sys.executable, '-m', 'pip', 'install', library])
return function(*args, **kwargs)
return wrapper
return decorator
def enable_decorator(dec, flag):
try:
flag = bool(int(flag))
except Exception as e:
flag = False
print(e)
def decorator(func):
if flag:
return dec(func)
return func
return decorator
|
encoders/audio/MFCCTimbreEncoder/__init__.py
|
sidphbot/jina-hub
| 106 |
140668
|
<filename>encoders/audio/MFCCTimbreEncoder/__init__.py
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
from jina.executors.decorators import batching, as_ndarray
from jina.executors.encoders import BaseAudioEncoder
class MFCCTimbreEncoder(BaseAudioEncoder):
"""
Extract a `n_mfcc`-dimensional feature vector for each MFCC frame.
:class:`MFCCTimbreEncoder` is based on Mel-Frequency Cepstral
Coefficients (MFCCs) which represent timbral features.
:class:`MFCCTimbreEncoder` encodes an audio signal from a
`Batch x Signal Length` ndarray into a
`Batch x Concatenated Features` ndarray.
:param input_sample_rate: input sampling rate in Hz
(22050 by default)
:param n_mfcc: the number of coefficients
(20 by default)
:param n_fft: length of the FFT window
(2048 by default)
:param hop_length: the number of samples between
successive MFCC frames (512 by default)
"""
def __init__(self, input_sample_rate: int = 22050, n_mfcc: int = 20, n_fft_length: int = 2048,
hop_length: int = 512, *args, **kwargs):
"""Set Constructor."""
super().__init__(*args, **kwargs)
self.input_sample_rate = input_sample_rate
self.n_mfcc = n_mfcc
self.n_fft_length = n_fft_length
self.hop_length = hop_length
@batching
@as_ndarray
def encode(self, content: np.ndarray, *args, **kwargs) -> np.ndarray:
"""
Craft the audio signal of each Document into short MFCC frames.
Extract MFCCs for each frame and concatenates Document frame
MFCCs into a single Document embedding.
:param content: a `Batch x Signal Length` ndarray,
where `Signal Length` is a number of samples
:return: a `Batch x Concatenated Features` ndarray,
where `Concatinated Features` is a `n_mfcc`-dimensional
feature vector times the number of the MFCC frames
"""
from librosa.feature import mfcc
embeds = []
for chunk_data in content:
mfccs = mfcc(y=chunk_data, sr=self.input_sample_rate, n_mfcc=self.n_mfcc, n_fft=self.n_fft_length,
hop_length=self.hop_length)
embeds.append(mfccs.flatten())
return embeds
|
utils/onnx_util.py
|
xuguozhi/Peppa-Facial-Landmark-PyTorch
| 163 |
140721
|
import torch
import torch.onnx
from models.slim import Slim
x = torch.randn(1, 3, 160, 160)
model = Slim()
model.load_state_dict(torch.load("../pretrained_weights/slim_160_latest.pth", map_location="cpu"))
model.eval()
torch.onnx.export(model, x, "../pretrained_weights/slim_160_latest.onnx", input_names=["input1"], output_names=['output1'])
|
proj/hps_accel/gateware/gen2/test_utils.py
|
keadwen/CFU-Playground
| 240 |
140728
|
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for uitls.py"""
from nmigen import Signal, Module
from nmigen_cfu import TestBase
from .utils import delay
import random
class DelayTest(TestBase):
"""Tests the delay function."""
def create_dut(self):
module = Module()
self.in_ = Signal(8)
self.outs_ = delay(module, self.in_, 3)
return module
def test_it(self):
# data with 3 zeros at end, since we are delaying by 3
data = [random.randrange(256) for _ in range(20)] + [0] * 3
def process():
for i in range(len(data)):
yield self.in_.eq(data[i])
yield
for j in range(3):
self.assertEqual((yield self.outs_[j]), data[i - j])
self.run_sim(process, False)
|
examples/computer_vision/efficientdet_pytorch/efficientdet_files/modelema.py
|
gh-determined-ai/determined
| 1,729 |
140733
|
'''
This is based on pytorch-image-models' ModelEMA
https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/utils/model_ema.py
This altered version refactors the load and save functionality to support Determined's fault tolerance features.
'''
from typing import Any, Dict, Sequence, Tuple, Union, cast
from collections import OrderedDict
import torch
from copy import deepcopy
from determined.pytorch import PyTorchCallback
class ModelEma:
""" Model Exponential Moving Average
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self, model, decay=0.9999, context='', resume=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.context = context
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def callback_object(self):
class Emacallback(PyTorchCallback):
def state_dict(this) -> Dict[str, Any]:
return {'model': self.ema.state_dict()}
def load_state_dict(this, state_dict: Dict[str, Any]) -> None:
self.ema.load_state_dict(state_dict['model'])
return Emacallback()
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
model_v = self.context.to_device(model_v)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
|
src/observer/python/test.py
|
oxnz/design-patterns
| 117 |
140769
|
#!/usr/bin/python
#coding: utf-8
class Subject(object):
def __init__(self):
self._observers = []
def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
def detach(self, observer):
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
for observer in self._observers:
if modifier != observer:
observer.update(self)
# Example usage
class DataSubject(Subject):
def __init__(self, name=""):
super(DataSubject, self).__init__()
self.name = name
self._data = 0
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
self.notify()
class Observer:
def __init__(self):
pass
def update(self, subject):
pass
class DataObserver(Observer):
def update(self, subject):
print ("DataSubject: %s has data %d") % (subject.name, subject.data)
def test():
d1 = DataSubject("DataSubject 1")
d2 = DataSubject("DataSubject 2")
ob1 = DataObserver()
ob2 = DataObserver()
d1.attach(ob1);
d1.attach(ob2);
d2.attach(ob1);
d2.attach(ob2);
print ("setting DataSubject 1 to 10")
print d1.data
d1.data = 10
print ("setting DataSubject 2 to 14")
d2.data = 14
print ("data 1 detach ob2")
d1.detach(ob2)
print ("setting DataSubject 1 to 20")
d1.data = 20
print ("data 1 detach ob1")
d1.detach(ob1)
print ("setting DataSubject 1 to 30")
d1.data = 30
if __name__ == '__main__':
test()
|
tests/unit/display/test_traditional.py
|
senstb/aws-elastic-beanstalk-cli
| 110 |
140784
|
<gh_stars>100-1000
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
from dateutil import tz
import mock
import unittest
from ebcli.display import traditional
from ebcli.objects import environment
class TestTraditionalHealthDataPoller(unittest.TestCase):
def test_get_instance_states__no_load_balancer(self):
self.assertEqual(
[],
traditional.TraditionalHealthDataPoller('fake app name', 'fake env name').get_instance_states(None)
)
def test_get_instance_states(self):
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
poller.get_load_balancer_instance_states = mock.MagicMock(
return_value=[
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'healthy'
},
{
'Description': '',
'InstanceId': 'i-0f5678192487123ab',
'Reason': '',
'State': 'healthy'
}
]
)
load_balancers = [{'Name': 'awseb-e-a-AWSEBLoa-1WOG31HKVP6LS'}]
self.assertEqual(
[
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'healthy'
},
{
'Description': '',
'InstanceId': 'i-0f5678192487123ab',
'Reason': '',
'State': 'healthy'
}
],
poller.get_instance_states(load_balancers)
)
@mock.patch('ebcli.lib.elb.get_health_of_instances')
def test_get_load_balancer_instance_states__using_elb_name(
self,
get_health_of_instances_mock
):
get_health_of_instances_mock.return_value = [
{
'InstanceId': 'i-077ad825504695eb9',
'State': 'InService',
'ReasonCode': 'N/A',
'Description': 'N/A'
},
{
'InstanceId': 'i-0965954076351e6e0',
'State': 'InService',
'ReasonCode': 'N/A',
'Description': 'N/A'
},
{
'InstanceId': 'i-0aa042833bfdec77d',
'State': 'InService',
'ReasonCode': 'N/A',
'Description': 'N/A'
}
]
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
self.assertEqual(
get_health_of_instances_mock.return_value,
poller.get_load_balancer_instance_states('awseb-e-a-AWSEBLoa-1WOG31HKVP6LS')
)
@mock.patch('ebcli.lib.elbv2.get_target_groups_for_load_balancer')
@mock.patch('ebcli.lib.elbv2.get_instance_healths_from_target_groups')
def test_get_load_balancer_instance_states__using_elbv2_arn(
self,
get_instance_healths_from_target_groups_mock,
get_target_groups_for_load_balancer_mock
):
get_target_groups_for_load_balancer_mock.return_value = [
{
'TargetGroupArn': 'arn:aws:elasticloadbalancing:us-west-2:123123123123:targetgroup/awseb-AWSEB-Z40E0JSOX7VX/132a50d3c6332139',
'TargetGroupName': 'awseb-AWSEB-Z40E0JSOX7VX',
'Protocol': 'HTTP',
'Port': 80,
'VpcId': 'vpc-0b94a86c',
'HealthCheckProtocol': 'HTTP',
'HealthCheckPort': 'traffic-port',
'HealthCheckIntervalSeconds': 15,
'HealthCheckTimeoutSeconds': 5,
'HealthyThresholdCount': 3,
'UnhealthyThresholdCount': 5,
'HealthCheckPath': '/',
'Matcher': {
'HttpCode': '200'
},
'LoadBalancerArns': [
'arn:aws:elasticloadbalancing:us-west-2:123123123123:loadbalancer/app/awseb-AWSEB-13USMLK35OCE0/e8b5c23789b536c6'
]
}
]
get_instance_healths_from_target_groups_mock.return_value = [
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'healthy'
}
]
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
self.assertEqual(
get_instance_healths_from_target_groups_mock.return_value,
poller.get_load_balancer_instance_states(
'arn:aws:elasticloadbalancing:us-west-2:123123123123:loadbalancer/app/awseb-AWSEB-13USMLK35OCE0/e8b5c23789b536c6'
)
)
@mock.patch('ebcli.lib.ec2.describe_instance')
def test_get_instance_health(
self,
curtailed_describe_instance_response_mock
):
curtailed_describe_instance_response_mock.side_effect = [
{'InstanceId': 'i-0cad09d6183cb22fb', 'State': {'Code': 16, 'Name': 'running'}},
{'InstanceId': 'i-0f5678192487123ab', 'State': {'Code': 16, 'Name': 'running'}},
]
instance_states = [
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'healthy'
},
{
'Description': '',
'InstanceId': 'i-0f5678192487123ab',
'Reason': '',
'State': 'healthy'
}
]
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
self.assertEqual(
[
{
'description': '',
'health': 'running',
'id': 'i-0cad09d6183cb22fb',
'state': 'healthy'
},
{
'description': '',
'health': 'running',
'id': 'i-0f5678192487123ab',
'state': 'healthy'
}
],
poller.get_instance_health(instance_states)
)
@mock.patch('ebcli.lib.ec2.describe_instance')
def test_get_health_information_of_instance_not_associated_with_elb__only_adds_those_instances_that_are_not_already_associated_with_the_environments_load_balancer(
self,
curtailed_describe_instance_response_mock
):
curtailed_describe_instance_response_mock.side_effect = [
{'InstanceId': 'i-0f5678192487123ab', 'State': {'Code': 16, 'Name': 'terminated'}},
{'InstanceId': 'i-0bfd123124124124d', 'State': {'Code': 16, 'Name': 'terminated'}}
]
ids_of_all_instances = ['i-0cad09d6183cb22fb', 'i-0f5678192487123ab', 'i-0bfd123124124124d']
instances_registered_with_elb = [
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'running'
},
]
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
expected_instances = [
{
'description': 'N/A (Not registered with Load Balancer)',
'health': 'terminated',
'id': 'i-0f5678192487123ab',
'state': 'n/a'
},
{
'description': 'N/A (Not registered with Load Balancer)',
'health': 'terminated',
'id': 'i-0bfd123124124124d',
'state': 'n/a'
}
]
actual_instances = poller.get_health_information_of_instance_not_associated_with_elb(
ids_of_all_instances,
instances_registered_with_elb
)
for expected_instance in expected_instances:
self.assertTrue(expected_instance in actual_instances)
curtailed_describe_instance_response_mock.assert_has_calls(
[
mock.call('i-0f5678192487123ab'),
mock.call('i-0bfd123124124124d')
],
any_order=True
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_environment')
@mock.patch('ebcli.display.traditional._datetime_utcnow_wrapper')
def test_env_data(self, datetime_utcnow_mock, get_environment_mock):
datetime_utcnow_mock.return_value = datetime.datetime(2018, 3, 14, 22, 0, 30, 195079)
get_environment_mock.return_value = environment.Environment(
name='fake env name',
status='Ready',
health='Green'
)
instance_states = [
{
'Description': '',
'InstanceId': 'i-0cad09d6183cb22fb',
'Reason': '',
'State': 'InService'
},
{
'Description': '',
'InstanceId': 'i-0f5678192487123ab',
'Reason': '',
'State': 'terminated'
},
{
'Description': '',
'InstanceId': 'i-0bfd123124124124d',
'Reason': '',
'State': 'terminated'
},
]
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
instance_ids = ['i-0cad09d6183cb22fb', 'i-0f5678192487123ab', 'i-0bfd123124124124d']
self.assertEqual(
{
'Color': 'Green',
'EnvironmentName': 'fake env name',
'InService': 1,
'Other': 2,
'RefreshedAt': datetime.datetime(2018, 3, 14, 22, 0, 30, 195079),
'Status': 'Ready',
'Total': 3
},
poller.assemble_environment_data(instance_ids, instance_states)
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_environment_resources')
@mock.patch('ebcli.display.traditional._datetime_utcnow_wrapper')
def test_get_health_data(
self,
datetime_utcnow_mock,
curtailed_get_environment_resources_mock
):
self.maxDiff = None
datetime_utcnow_mock.return_value = datetime.datetime(2018, 3, 14, 22, 0, 30, 195079)
curtailed_get_environment_resources_mock.return_value = {
'EnvironmentResources': {
'EnvironmentName': 'fake env name',
'Instances': [
{
'Id': 'i-0aa042833bfdec77d'
},
{
'Id': 'i-0965954076351e6e0'
},
{
'Id': 'i-077ad825504695eb9'
}
],
'LoadBalancers': [
{
'Name': 'awseb-e-a-AWSEBLoa-1WOG31HKVP6LS'
}
],
}
}
poller = traditional.TraditionalHealthDataPoller('fake app name', 'fake env name')
poller.get_instance_states = mock.MagicMock(
return_value=[
{
'Description': '',
'InstanceId': 'i-0aa042833bfdec77d',
'Reason': '',
'State': 'healthy'
},
{
'Description': '',
'InstanceId': 'i-0965954076351e6e0',
'Reason': '',
'State': 'healthy'
},
]
)
poller.get_instance_health = mock.MagicMock(
return_value=[
{
'description': '',
'health': 'running',
'id': 'i-0aa042833bfdec77d',
'state': 'healthy'
},
{
'description': '',
'health': 'running',
'id': 'i-0965954076351e6e0',
'state': 'healthy'
},
]
)
poller.get_health_information_of_instance_not_associated_with_elb = mock.MagicMock(
return_value=[
{
'description': 'N/A (Not registered with Load Balancer)',
'health': 'terminated',
'id': 'i-077ad825504695eb9',
'state': 'n/a'
}
]
)
poller.assemble_environment_data = mock.MagicMock(
return_value={
'Color': 'Green',
'EnvironmentName': 'fake env name',
'InService': 1,
'Other': 2,
'RefreshedAt': datetime.datetime(2018, 3, 14, 22, 0, 30, 195079),
'Status': 'Ready',
'Total': 3
}
)
self.assertEqual(
{
'environment': {
'Color': 'Green',
'EnvironmentName': 'fake env name',
'InService': 1,
'Other': 2,
'RefreshedAt': datetime.datetime(2018, 3, 14, 22, 0, 30, 195079),
'Status': 'Ready',
'Total': 3
},
'instances': [
{
'description': '',
'health': 'running',
'id': 'i-0aa042833bfdec77d',
'state': 'healthy'
},
{
'description': '',
'health': 'running',
'id': 'i-0965954076351e6e0',
'state': 'healthy'
},
{
'description': 'N/A (Not registered with Load Balancer)',
'health': 'terminated',
'id': 'i-077ad825504695eb9',
'state': 'n/a'
}
]
},
poller._get_health_data()
)
|
photogrammetry_importer/file_handlers/meshroom_file_handler.py
|
clayne/Blender-Addon-Photogrammetry-Importer
| 452 |
140821
|
import json
import numpy as np
import os
from photogrammetry_importer.types.camera import Camera
from photogrammetry_importer.types.point import Point
from photogrammetry_importer.file_handlers.utility import (
check_radial_distortion,
)
from photogrammetry_importer.blender_utility.logging_utility import log_report
class MeshroomFileHandler:
"""Class to read and write :code:`Meshroom` files and workspaces."""
# Note: *.SfM files are actually just *.JSON files.
@staticmethod
def _get_element(data_list, id_string, query_id):
result = None
for ele in data_list:
if int(ele[id_string]) == query_id:
result = ele
break
assert result is not None
return result
@classmethod
def _parse_cameras_from_json_data(
cls,
json_data,
image_dp,
image_fp_type,
suppress_distortion_warnings,
op,
):
cams = []
image_index_to_camera_index = {}
is_valid_file = (
"views" in json_data
and "intrinsics" in json_data
and "poses" in json_data
)
if not is_valid_file:
log_report(
"ERROR",
"FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the"
+ " SfM reconstruction results: view, intrinsics and poses.",
op,
)
return cams, image_index_to_camera_index
views = json_data["views"] # is a list of dicts (view)
intrinsics = json_data["intrinsics"] # is a list of dicts (intrinsic)
extrinsics = json_data["poses"] # is a list of dicts (extrinsic)
# IMPORTANT:
# Views contain the number of input images
# Extrinsics may contain only a subset of views!
# (Not all views are necessarily contained in the reconstruction)
for rec_index, extrinsic in enumerate(extrinsics):
camera = Camera()
view_index = int(extrinsic["poseId"])
image_index_to_camera_index[view_index] = rec_index
corresponding_view = cls._get_element(views, "poseId", view_index)
camera.image_fp_type = image_fp_type
camera.image_dp = image_dp
camera._absolute_fp = str(corresponding_view["path"])
camera._relative_fp = os.path.basename(
str(corresponding_view["path"])
)
camera._undistorted_relative_fp = str(extrinsic["poseId"]) + ".exr"
if image_dp is None:
camera._undistorted_absolute_fp = None
else:
camera._undistorted_absolute_fp = os.path.join(
image_dp, camera._undistorted_relative_fp
)
camera.width = int(corresponding_view["width"])
camera.height = int(corresponding_view["height"])
id_intrinsic = int(corresponding_view["intrinsicId"])
intrinsic_params = cls._get_element(
intrinsics, "intrinsicId", id_intrinsic
)
focal_length = float(intrinsic_params["pxFocalLength"])
cx = float(intrinsic_params["principalPoint"][0])
cy = float(intrinsic_params["principalPoint"][1])
if (
"distortionParams" in intrinsic_params
and len(intrinsic_params["distortionParams"]) > 0
):
# TODO proper handling of distortion parameters
radial_distortion = float(
intrinsic_params["distortionParams"][0]
)
else:
radial_distortion = 0.0
if not suppress_distortion_warnings:
check_radial_distortion(
radial_distortion, camera._relative_fp, op
)
camera_calibration_matrix = np.array(
[[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]]
)
camera.set_calibration(
camera_calibration_matrix, radial_distortion
)
extrinsic_params = extrinsic["pose"]["transform"]
cam_rotation_list = extrinsic_params["rotation"]
camera.set_rotation_with_rotation_mat(
np.array(cam_rotation_list, dtype=float).reshape(3, 3).T
)
camera.set_camera_center_after_rotation(
np.array(extrinsic_params["center"], dtype=float)
)
camera.view_index = view_index
cams.append(camera)
return cams, image_index_to_camera_index
@staticmethod
def _parse_points_from_json_data(
json_data, image_index_to_camera_index, op
):
points = []
is_valid_file = "structure" in json_data
if not is_valid_file:
log_report(
"ERROR",
"FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain "
+ " the SfM reconstruction results: structure.",
op,
)
return points
structure = json_data["structure"]
for json_point in structure:
custom_point = Point(
coord=np.array(json_point["X"], dtype=float),
color=np.array(json_point["color"], dtype=int),
id=int(json_point["landmarkId"]),
scalars=[],
)
points.append(custom_point)
return points
@classmethod
def parse_meshroom_sfm_file(
cls,
sfm_ifp,
image_idp,
image_fp_type,
suppress_distortion_warnings,
op=None,
):
"""Parse a :code:`Meshroom` (:code:`.sfm` or :code:`.json`) file.
Parse different file formats created with the
:code:`StructureFromMotion` / :code:`ConvertSfMFormat` node in
:code:`Meshroom`.
"""
log_report("INFO", "parse_meshroom_sfm_file: ...", op)
log_report("INFO", "sfm_ifp: " + sfm_ifp, op)
input_file = open(sfm_ifp, "r")
json_data = json.load(input_file)
(
cams,
image_index_to_camera_index,
) = cls._parse_cameras_from_json_data(
json_data,
image_idp,
image_fp_type,
suppress_distortion_warnings,
op,
)
if "structure" in json_data:
points = cls._parse_points_from_json_data(
json_data, image_index_to_camera_index, op
)
else:
points = []
log_report("INFO", "parse_meshroom_sfm_file: Done", op)
return cams, points
@staticmethod
def _get_latest_node(json_graph, node_type):
i = 0
while node_type + "_" + str(i + 1) in json_graph:
i = i + 1
if i == 0:
return None
else:
return json_graph[node_type + "_" + str(i)]
@classmethod
def _get_node(cls, json_graph, node_type, node_number, op):
if node_number == -1:
return cls._get_latest_node(json_graph, node_type)
else:
node_key = node_type + "_" + str(node_number)
if node_key in json_graph:
return json_graph[node_key]
else:
log_report(
"ERROR",
"Invalid combination of node type (i.e. "
+ node_type
+ ") "
+ "and node number (i.e. "
+ str(node_number)
+ ") provided",
op,
)
assert False
@staticmethod
def _get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list):
if isinstance(fn_or_fn_list, str):
fn_list = [fn_or_fn_list]
else:
fn_list = fn_or_fn_list
if data_node is None:
return None
node_type = data_node["nodeType"]
uid_0 = data_node["uids"]["0"]
data_fp = None
for fn in fn_list:
possible_data_fp = os.path.join(cache_dp, node_type, uid_0, fn)
if os.path.isfile(possible_data_fp):
data_fp = possible_data_fp
break
return data_fp
@classmethod
def _get_node_data_fp(
cls, cache_dp, json_graph, node_type, node_number, fn_or_fn_list, op
):
data_node = cls._get_node(json_graph, node_type, node_number, op)
data_fp = cls._get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list)
return data_fp
@staticmethod
def _get_data_dp_of_node(cache_dp, data_node):
if data_node is None:
return None
node_type = data_node["nodeType"]
uid_0 = data_node["uids"]["0"]
return os.path.join(cache_dp, node_type, uid_0)
@classmethod
def _get_node_data_dp(
cls, cache_dp, json_graph, node_type, node_number, op
):
data_node = cls._get_node(json_graph, node_type, node_number, op)
data_dp = cls._get_data_dp_of_node(cache_dp, data_node)
return data_dp
@classmethod
def _get_sfm_fp(
cls, sfm_node_type, cache_dp, json_graph, sfm_node_number, op
):
if sfm_node_type == "ConvertSfMFormatNode":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"ConvertSfMFormat",
sfm_node_number,
["sfm.sfm", "sfm.json"],
op,
)
elif sfm_node_type == "StructureFromMotionNode":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"StructureFromMotion",
sfm_node_number,
"cameras.sfm",
op,
)
elif sfm_node_type == "AUTOMATIC":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"ConvertSfMFormat",
sfm_node_number,
["sfm.sfm", "sfm.json"],
op,
)
if sfm_fp is None:
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"StructureFromMotion",
sfm_node_number,
"cameras.sfm",
op,
)
else:
log_report("ERROR", "Selected SfM node is not supported", op)
assert False
return sfm_fp
@classmethod
def _get_mesh_fp(
cls, mesh_node_type, cache_dp, json_graph, mesh_node_number, op
):
if mesh_node_type == "Texturing":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Texturing",
mesh_node_number,
"texturedMesh.obj",
op,
)
elif mesh_node_type == "MeshFiltering":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"MeshFiltering",
mesh_node_number,
"mesh.obj",
op,
)
elif mesh_node_type == "Meshing":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Meshing",
mesh_node_number,
"mesh.obj",
op,
)
elif mesh_node_type == "AUTOMATIC":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Texturing",
mesh_node_number,
"texturedMesh.obj",
op,
)
if mesh_fp is None:
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"MeshFiltering",
mesh_node_number,
"mesh.obj",
op,
)
if mesh_fp is None:
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Meshing",
mesh_node_number,
"mesh.obj",
op,
)
else:
log_report("ERROR", "Select Mesh node is not supported!", op)
assert False
return mesh_fp
@classmethod
def _get_image_dp(cls, cache_dp, json_graph, prepare_node_number, op):
prepare_dp = cls._get_node_data_dp(
cache_dp,
json_graph,
"PrepareDenseScene",
prepare_node_number,
op,
)
return prepare_dp
@classmethod
def parse_meshrom_mg_file(
cls,
mg_fp,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op=None,
):
"""Parse a :code:`Meshroom` project file (:code:`.mg`)."""
cache_dp = os.path.join(os.path.dirname(mg_fp), "MeshroomCache")
json_data = json.load(open(mg_fp, "r"))
json_graph = json_data["graph"]
sfm_fp = cls._get_sfm_fp(
sfm_node_type, cache_dp, json_graph, sfm_node_number, op
)
mesh_fp = cls._get_mesh_fp(
mesh_node_type, cache_dp, json_graph, mesh_node_number, op
)
image_dp = cls._get_image_dp(
cache_dp, json_graph, prepare_node_number, op
)
if sfm_fp is not None:
log_report("INFO", "Found the following sfm file: " + sfm_fp, op)
else:
log_report(
"INFO",
"Request target SfM result does not exist in this meshroom"
" project.",
op,
)
if mesh_fp is not None:
log_report("INFO", "Found the following mesh file: " + mesh_fp, op)
else:
log_report(
"INFO",
"Request target mesh does not exist in this meshroom project.",
op,
)
return sfm_fp, mesh_fp, image_dp
@classmethod
def parse_meshroom_file(
cls,
meshroom_ifp,
use_workspace_images,
image_dp,
image_fp_type,
suppress_distortion_warnings,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op=None,
):
"""Parse a :code:`Meshroom` file.
Supported file formats are :code:`.mg`, :code:`.sfm` or :code:`.json`.
"""
log_report("INFO", "parse_meshroom_file: ...", op)
log_report("INFO", "meshroom_ifp: " + meshroom_ifp, op)
ext = os.path.splitext(meshroom_ifp)[1].lower()
if ext == ".mg":
(
meshroom_ifp,
mesh_fp,
image_idp_workspace,
) = cls.parse_meshrom_mg_file(
meshroom_ifp,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op,
)
if (
use_workspace_images
and image_idp_workspace is not None
and os.path.isdir(image_idp_workspace)
):
image_dp = image_idp_workspace
log_report("INFO", "Using image directory in workspace.", op)
else:
assert ext == ".json" or ext == ".sfm"
mesh_fp = None
if meshroom_ifp is not None:
cams, points = cls.parse_meshroom_sfm_file(
meshroom_ifp,
image_dp,
image_fp_type,
suppress_distortion_warnings,
op,
)
else:
log_report(
"WARNING",
"Meshroom project does not contain cameras or points. Have"
" you saved the project (i.e. the *.mg file)?",
op,
)
cams = []
points = []
log_report("INFO", "parse_meshroom_file: Done", op)
return cams, points, mesh_fp, image_dp
|
pymtl3/stdlib/basic_rtl/register_files.py
|
pymtl/pymtl3
| 152 |
140826
|
from pymtl3 import *
class RegisterFile( Component ):
def construct( s, Type, nregs=32, rd_ports=1, wr_ports=1,
const_zero=False ):
addr_type = mk_bits( max( 1, clog2( nregs ) ) )
s.raddr = [ InPort( addr_type ) for i in range( rd_ports ) ]
s.rdata = [ OutPort( Type ) for i in range( rd_ports ) ]
s.waddr = [ InPort( addr_type ) for i in range( wr_ports ) ]
s.wdata = [ InPort( Type ) for i in range( wr_ports ) ]
s.wen = [ InPort( Bits1 ) for i in range( wr_ports ) ]
s.regs = [ Wire( Type ) for i in range(nregs) ]
@update
def up_rf_read():
for i in range( rd_ports ):
s.rdata[i] @= s.regs[ s.raddr[i] ]
if const_zero:
@update_ff
def up_rf_write_constzero():
for i in range( wr_ports ):
if s.wen[i] & (s.waddr[i] != 0):
s.regs[ s.waddr[i] ] <<= s.wdata[i]
else:
@update_ff
def up_rf_write():
for i in range( wr_ports ):
if s.wen[i]:
s.regs[ s.waddr[i] ] <<= s.wdata[i]
|
inversion/video/post_processing.py
|
cedro3/stylegan3-editing
| 347 |
140832
|
from typing import List, Dict
import numpy as np
import torch
from tqdm import tqdm
from utils.fov_expansion import Expander
from inversion.video.video_config import VideoConfig
from utils.common import tensor2im, get_identity_transform
def postprocess_and_smooth_inversions(results: Dict, net, opts: VideoConfig):
result_latents = np.array(list(results["result_latents"].values()))
# average fine layers
result_latents[:, 9:, :] = result_latents[:, 9:, :].mean(axis=0)
# smooth latents and landmarks transforms
smoothed_latents, smoothed_transforms = smooth_latents_and_transforms(result_latents,
results["landmarks_transforms"],
opts=opts)
# generate the smoothed video frames
result_images_smoothed = []
expander = Expander(G=net.decoder)
print("Generating smoothed frames...")
for latent, trans in tqdm(zip(smoothed_latents, smoothed_transforms)):
with torch.no_grad():
if trans is None:
trans = get_identity_transform()
im = expander.generate_expanded_image(ws=latent.unsqueeze(0),
landmark_t=trans.cpu().numpy(),
pixels_left=opts.expansion_amounts[0],
pixels_right=opts.expansion_amounts[1],
pixels_top=opts.expansion_amounts[2],
pixels_bottom=opts.expansion_amounts[3])
result_images_smoothed.append(np.array(tensor2im(im[0])))
return result_images_smoothed
def smooth_latents_and_transforms(result_latents: np.ndarray, result_landmarks_transforms: List[torch.tensor],
opts: VideoConfig):
smoothed_latents = smooth_ws(result_latents)
smoothed_latents = torch.from_numpy(smoothed_latents).float().cuda()
if opts.landmarks_transforms_path is not None:
smoothed_transforms = smooth_ws(torch.cat([t.unsqueeze(0) for t in result_landmarks_transforms]))
else:
smoothed_transforms = [None] * len(smoothed_latents)
return smoothed_latents, smoothed_transforms
def smooth_ws(ws: np.ndarray):
ws_p = ws[2:-2] + 0.75 * ws[3:-1] + 0.75 * ws[1:-3] + 0.25 * ws[:-4] + 0.25 * ws[4:]
ws_p = ws_p / 3
return ws_p
def smooth_s(s):
batched_s = {}
for c in s[0]:
bathced_c = torch.cat([s[i][c] for i in range(len(s))])
batched_s[c] = bathced_c
new_s = {}
for c in batched_s:
new_s[c] = smooth_ws(batched_s[c])
new_smooth_s = []
for i in range(new_s['input'].shape[0]):
curr_s = {c: new_s[c][i].unsqueeze(0) for c in new_s}
new_smooth_s.append(curr_s)
return new_smooth_s
|
RecoTracker/Configuration/python/RecoTrackerP5_EventContent_cff.py
|
ckamtsikis/cmssw
| 852 |
140863
|
import FWCore.ParameterSet.Config as cms
#Tracks without extra and hits
#AOD content
RecoTrackerAOD = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep recoTracks_ctfWithMaterialTracksP5_*_*',
'keep recoTracks_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep recoTracks_rsWithMaterialTracksP5_*_*',
'keep recoTracks_cosmictrackfinderP5_*_*',
'keep recoTracks_beamhaloTracks_*_*',
'keep recoTracks_splittedTracksP5_*_*',
'keep recoTracks_ctfWithMaterialTracksP5Top_*_*',
'keep recoTracks_rsWithMaterialTracksP5Top_*_*',
'keep recoTracks_cosmictrackfinderP5Top_*_*',
'keep recoTracks_ctfWithMaterialTracksP5Bottom_*_*',
'keep recoTracks_rsWithMaterialTracksP5Bottom_*_*',
'keep recoTracks_cosmictrackfinderP5Bottom_*_*',
'keep recoTracks_regionalCosmicTracks_*_*',
'keep *_dedxHitInfo_*_*',
'keep *_dedxHarmonic2_*_*',
'keep *_dedxHitInfoCTF_*_*',
'keep *_dedxHarmonic2CTF_*_*',
'keep *_dedxHitInfoCosmicTF_*_*',
'keep *_dedxHarmonic2CosmicTF_*_*')
)
#RECO content
RecoTrackerRECO = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep recoTrackExtras_ctfWithMaterialTracksP5_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5_*_*',
'keep recoTrackExtras_cosmictrackfinderP5_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5_*_*',
'keep recoTrackExtras_beamhaloTracks_*_*',
'keep TrackingRecHitsOwned_beamhaloTracks_*_*',
'keep recoTrackExtras_splittedTracksP5_*_*',
'keep TrackingRecHitsOwned_splittedTracksP5_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5Top_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5Top_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5Top_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5Top_*_*',
'keep recoTrackExtras_cosmictrackfinderP5Top_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5Top_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5Bottom_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5Bottom_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5Bottom_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5Bottom_*_*',
'keep recoTrackExtras_cosmictrackfinderP5Bottom_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5Bottom_*_*',
'keep recoTrackExtras_regionalCosmicTracks_*_*',
'keep TrackingRecHitsOwned_regionalCosmicTracks_*_*',
'keep *_dedxTruncated40_*_*',
'keep *_dedxTruncated40CTF_*_*',
'keep *_dedxTruncated40CosmicTF_*_*',
'keep recoTracks_cosmicDCTracks_*_*',
'keep recoTrackExtras_cosmicDCTracks_*_*',
'keep TrackingRecHitsOwned_cosmicDCTracks_*_*')
)
RecoTrackerRECO.outputCommands.extend(RecoTrackerAOD.outputCommands)
#Full Event content
RecoTrackerFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoTrackerFEVT.outputCommands.extend(RecoTrackerRECO.outputCommands)
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/zope/interface/tests/test_sorting.py
|
tharindu1st/apim-migration-resources
| 9,953 |
140866
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test interface sorting
"""
import unittest
from zope.interface import Interface
class I1(Interface): pass
class I2(I1): pass
class I3(I1): pass
class I4(Interface): pass
class I5(I4): pass
class I6(I2): pass
class Test(unittest.TestCase):
def test(self):
l = [I1, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6])
def test_w_None(self):
l = [I1, None, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6, None])
def test_w_equal_names(self):
# interfaces with equal names but different modules should sort by
# module name
from zope.interface.tests.m1 import I1 as m1_I1
l = [I1, m1_I1]
l.sort()
self.assertEqual(l, [m1_I1, I1])
|
desktop_local_tests/test_packet_capture_disrupt_vpn_connection.py
|
UAEKondaya1/expressvpn_leak_testing
| 219 |
140876
|
<filename>desktop_local_tests/test_packet_capture_disrupt_vpn_connection.py
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.vpn_connection_disrupter import VPNConnectionDisrupter
class TestPacketCaptureDisruptVPNConnection(LocalPacketCaptureTestCaseWithDisrupter):
'''Summary:
Tests whether traffic leaving the user's device leaks outside of the VPN tunnel when the VPN
server becomes unreachable.
Details:
This test will connect to VPN then put up firewall rules which block connectivity to the VPN
server. The test looks for leaking traffic once the interface has been disabled.
Discussion:
Connectivity drops to the VPN server are very real world threats. This could happen for a
variety of reasons:
* Server goes down
* Server is deliberately taken out of rotation for maintenance etc..
* Blocking
* Bad routes
In all cases a firewall adequately represents these connectivity drops.
Weaknesses:
Packet capture tests can be noisy. Traffic can be detected as a leak but in actual fact may not
be. For example, traffic might go to a server owned by the VPN provider to re-establish
connections. In general this test is best used for manual exploring leaks rather than for
automation.
With some systems/VPN applications, a firewall on the test device might not adequately block the
VPN server. For such setups, a secondary device is needed e.g.
* Firewall on a router
* Firewall on host if the test device is a VM.
Scenarios:
No restrictions.
TODO:
Implement multi-device test with firewall off device
'''
def __init__(self, devices, parameters):
super().__init__(VPNConnectionDisrupter, devices, parameters)
|
lib/pymedphys/trf.py
|
ethanio12345/pymedphys
| 207 |
140909
|
<reponame>ethanio12345/pymedphys<filename>lib/pymedphys/trf.py<gh_stars>100-1000
# pylint: disable = unused-import, missing-docstring
from pymedphys._trf.decode.trf2pandas import trf2pandas as read
from pymedphys._trf.manage.identify import identify_logfile as identify
|
openfold/utils/lr_schedulers.py
|
ville761/openfold
| 789 |
140911
|
import torch
class AlphaFoldLRScheduler(torch.optim.lr_scheduler._LRScheduler):
""" Implements the learning rate schedule defined in the AlphaFold 2
supplement. A linear warmup is followed by a plateau at the maximum
learning rate and then exponential decay.
Note that the initial learning rate of the optimizer in question is
ignored; use this class' base_lr parameter to specify the starting
point of the warmup.
"""
def __init__(self,
optimizer,
last_epoch: int = -1,
verbose: bool = False,
base_lr: float = 0.,
max_lr: float = 0.001,
warmup_no_steps: int = 1000,
start_decay_after_n_steps: int = 50000,
decay_every_n_steps: int = 50000,
decay_factor: float = 0.95,
):
step_counts = {
"warmup_no_steps": warmup_no_steps,
"start_decay_after_n_steps": start_decay_after_n_steps,
}
for k,v in step_counts.items():
if(v < 0):
raise ValueError(f"{k} must be nonnegative")
if(warmup_no_steps > start_decay_after_n_steps):
raise ValueError(
"warmup_no_steps must not exceed start_decay_after_n_steps"
)
self.optimizer = optimizer
self.last_epoch = last_epoch
self.verbose = verbose
self.base_lr = base_lr
self.max_lr = max_lr
self.warmup_no_steps = warmup_no_steps
self.start_decay_after_n_steps = start_decay_after_n_steps
self.decay_every_n_steps = decay_every_n_steps
self.decay_factor = decay_factor
super(AlphaFoldLRScheduler, self).__init__(
optimizer,
last_epoch=last_epoch,
verbose=verbose,
)
def state_dict(self):
state_dict = {
k:v for k,v in self.__dict__.items() if k not in ["optimizer"]
}
return state_dict
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
if(not self._get_lr_called_within_step):
raise RuntimeError(
"To get the last learning rate computed by the scheduler, use "
"get_last_lr()"
)
step_no = self.last_epoch
if(step_no <= self.warmup_no_steps):
lr = self.base_lr + (step_no / self.warmup_no_steps) * self.max_lr
elif(step_no > self.start_decay_after_n_steps):
steps_since_decay = step_no - self.start_decay_after_n_steps
exp = (steps_since_decay // self.decay_every_n_steps) + 1
lr = self.max_lr * (self.decay_factor ** exp)
else: # plateau
lr = self.max_lr
return [lr for group in self.optimizer.param_groups]
|
regipy/__init__.py
|
pombredanne/regipy
| 190 |
140945
|
<reponame>pombredanne/regipy
from .registry import *
__title__ = 'regipy'
__version__ = '2.2.0'
|
public-engines/kaggle-titanic-engine/tests/prediction/test_predictor.py
|
tallandroid/incubator-marvin
| 101 |
140951
|
#!/usr/bin/env python
# coding=utf-8
try:
import mock
except ImportError:
import unittest.mock as mock
from marvin_titanic_engine.prediction import Predictor
class TestPredictor:
def test_execute(self, mocked_params):
model_mocked = {
"rf": mock.MagicMock(),
"svm": mock.MagicMock()
}
ac = Predictor(model=model_mocked)
ac.execute(input_message="fake message", params=mocked_params)
model_mocked["rf"].predict.assert_called_once()
model_mocked["svm"].predict.assert_called_once()
|
problems/tests/test_sock_merchant.py
|
vinta/fuck-coding-interviews
| 590 |
140964
|
<gh_stars>100-1000
# coding: utf-8
import unittest
from problems.sock_merchant import sockMerchant
class TestCase(unittest.TestCase):
def test(self):
test_data = [
{'n': 9, 'ar': [10, 20, 20, 10, 10, 30, 50, 10, 20], 'expected': 3},
]
for data in test_data:
n = data['n']
ar = data['ar']
expected = data['expected']
with self.subTest(n=n, ar=ar):
self.assertEqual(sockMerchant(n, ar), expected)
if __name__ == '__main__':
unittest.main()
|
tools/osx/crosstool/osx_archs.bzl
|
obruns/bazel
| 16,989 |
140985
|
"""Information regarding crosstool-supported architectures."""
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List of architectures supported by osx crosstool.
OSX_TOOLS_NON_DEVICE_ARCHS = [
"darwin_x86_64",
"darwin_arm64",
"darwin_arm64e",
"ios_i386",
"ios_x86_64",
"watchos_i386",
"watchos_x86_64",
"tvos_x86_64",
]
OSX_TOOLS_ARCHS = [
"ios_armv7",
"ios_arm64",
"ios_arm64e",
"watchos_armv7k",
"watchos_arm64_32",
"tvos_arm64",
] + OSX_TOOLS_NON_DEVICE_ARCHS
|
tests/unit/operations/test_solution_stack_ops.py
|
sdolenc/aws-elastic-beanstalk-cli
| 110 |
140995
|
<filename>tests/unit/operations/test_solution_stack_ops.py
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import yaml
import mock
import unittest
from ebcli.objects.solutionstack import SolutionStack
from ebcli.operations import solution_stack_ops
from ebcli.operations.platformops import PlatformVersion
class TestSolutionstackOps(unittest.TestCase):
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
@mock.patch('ebcli.operations.platform_version_ops.list_custom_platform_versions')
def test_find_solution_stack_from_string(
self,
custom_platforms_lister_mock,
solution_stack_lister_mock
):
solution_strings = [
'docker-1.11.2',
'docker-1.12.6',
'docker-1.6.2',
'docker-1.7.1',
'docker-1.9.1',
'docker-17.03.1-ce',
'glassfish-4.0-java-7-(preconfigured-docker)',
'glassfish-4.1-java-8-(preconfigured-docker)',
'go-1.3-(preconfigured-docker)',
'go-1.4',
'go-1.4-(preconfigured-docker)',
'go-1.5',
'go-1.6',
'go-1.8',
'iis-10.0',
'iis-7.5',
'iis-8',
'iis-8.5',
'java-7',
'java-8',
'multi-container-docker-1.11.2-(generic)',
'multi-container-docker-1.6.2-(generic)',
'multi-container-docker-1.9.1-(generic)',
'multi-container-docker-17.03.1-ce-(generic)',
'node.js',
'packer-1.0.0',
'packer-1.0.3',
'php-5.3',
'php-5.4',
'php-5.5',
'php-5.6',
'php-7.0',
'python',
'python-2.7',
'python-3.4',
'python-3.4-(preconfigured-docker)',
'ruby-1.9.3',
'ruby-2.0-(passenger-standalone)',
'ruby-2.0-(puma)',
'ruby-2.1-(passenger-standalone)',
'ruby-2.1-(puma)',
'ruby-2.2-(passenger-standalone)',
'ruby-2.2-(puma)',
'ruby-2.3-(passenger-standalone)',
'ruby-2.3-(puma)',
'tomcat-6',
'tomcat-7',
'tomcat-7-java-6',
'tomcat-7-java-7',
'tomcat-8-java-8',
'Node.js',
'PHP',
'Python',
'Ruby',
'Tomcat',
'IIS',
'Docker',
'Multi-container Docker',
'Glassfish',
'Go',
'Java',
'Packer',
'64bit Windows Server Core 2016 v1.2.0 running IIS 10.0',
'64bit Windows Server 2016 v1.2.0 running IIS 10.0',
'64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5',
'64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5',
'64bit Windows Server 2012 v1.2.0 running IIS 8',
'64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5',
'64bit Amazon Linux 2017.03 v2.5.3 running Java 8',
'64bit Amazon Linux 2017.03 v2.5.3 running Java 7',
'64bit Amazon Linux 2017.03 v4.2.1 running Node.js',
'64bit Amazon Linux 2017.03 v4.2.0 running Node.js',
'64bit Amazon Linux 2017.03 v4.1.1 running Node.js',
'64bit Amazon Linux 2015.09 v2.0.8 running Node.js',
'64bit Amazon Linux 2015.03 v1.4.6 running Node.js',
'64bit Amazon Linux 2014.03 v1.1.0 running Node.js',
'32bit Amazon Linux 2014.03 v1.1.0 running Node.js',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.4',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.5',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.6',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0',
'64bit Amazon Linux 2017.03 v2.4.1 running PHP 5.6',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.4',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.5',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.6',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 7.0',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.6',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.5',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.4',
'64bit Amazon Linux 2014.03 v1.1.0 running PHP 5.5',
'64bit Amazon Linux 2014.03 v1.1.0 running PHP 5.4',
'32bit Amazon Linux 2014.03 v1.1.0 running PHP 5.5',
'32bit Amazon Linux 2014.03 v1.1.0 running PHP 5.4',
'64bit Amazon Linux running PHP 5.3',
'32bit Amazon Linux running PHP 5.3',
'64bit Amazon Linux 2017.03 v2.5.0 running Python 3.4',
'64bit Amazon Linux 2017.03 v2.5.0 running Python',
'64bit Amazon Linux 2017.03 v2.5.0 running Python 2.7',
'64bit Amazon Linux 2015.03 v1.4.6 running Python 3.4',
'64bit Amazon Linux 2015.03 v1.4.6 running Python 2.7',
'64bit Amazon Linux 2015.03 v1.4.6 running Python',
'64bit Amazon Linux 2014.03 v1.1.0 running Python 2.7',
'64bit Amazon Linux 2014.03 v1.1.0 running Python',
'32bit Amazon Linux 2014.03 v1.1.0 running Python 2.7',
'32bit Amazon Linux 2014.03 v1.1.0 running Python',
'64bit Amazon Linux running Python',
'32bit Amazon Linux running Python',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.3 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.2 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.3 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.2 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 1.9.3',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.2 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.2 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 1.9.3',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 1.9.3',
'32bit Amazon Linux 2014.03 v1.1.0 running Ruby 1.9.3',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 8 Java 8',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 7 Java 7',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 7 Java 6',
'64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 8 Java 8',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 7 Java 7',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 7 Java 6',
'64bit Amazon Linux 2015.03 v1.4.4 running Tomcat 7 Java 7',
'64bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 7',
'64bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 6',
'32bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 7',
'32bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 6',
'64bit Amazon Linux running Tomcat 7',
'64bit Amazon Linux running Tomcat 6',
'32bit Amazon Linux running Tomcat 7',
'32bit Amazon Linux running Tomcat 6',
'64bit Windows Server Core 2012 R2 running IIS 8.5',
'64bit Windows Server 2012 R2 running IIS 8.5',
'64bit Windows Server 2012 running IIS 8',
'64bit Windows Server 2008 R2 running IIS 7.5',
'64bit Amazon Linux 2017.03 v2.7.2 running Docker 17.03.1-ce',
'64bit Amazon Linux 2017.03 v2.7.1 running Docker 17.03.1-ce',
'64bit Amazon Linux 2017.03 v2.6.0 running Docker 1.12.6',
'64bit Amazon Linux 2016.09 v2.3.0 running Docker 1.11.2',
'64bit Amazon Linux 2016.03 v2.1.6 running Docker 1.11.2',
'64bit Amazon Linux 2016.03 v2.1.0 running Docker 1.9.1',
'64bit Amazon Linux 2015.09 v2.0.6 running Docker 1.7.1',
'64bit Amazon Linux 2015.03 v1.4.6 running Docker 1.6.2',
'64bit Amazon Linux 2017.03 v2.7.3 running Multi-container Docker 17.03.1-ce (Generic)',
'64bit Amazon Linux 2016.03 v2.1.6 running Multi-container Docker 1.11.2 (Generic)',
'64bit Amazon Linux 2016.03 v2.1.0 running Multi-container Docker 1.9.1 (Generic)',
'64bit Amazon Linux 2015.03 v1.4.6 running Multi-container Docker 1.6.2 (Generic)',
'64bit Debian jessie v2.7.2 running GlassFish 4.1 Java 8 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running GlassFish 4.0 Java 7 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running GlassFish 4.1 Java 8 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running GlassFish 4.0 Java 7 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Go 1.4 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Go 1.3 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Go 1.4 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Go 1.3 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Python 3.4 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Python 3.4 (Preconfigured - Docker)',
'64bit Amazon Linux 2017.03 v2.5.1 running Go 1.8',
'64bit Amazon Linux 2016.09 v2.3.3 running Go 1.6',
'64bit Amazon Linux 2016.09 v2.3.0 running Go 1.5',
'64bit Amazon Linux 2016.03 v2.1.0 running Go 1.4',
'64bit Amazon Linux 2017.03 v2.3.1 running Packer 1.0.3',
'64bit Amazon Linux 2017.03 v2.2.2 running Packer 1.0.0',
'Node.js',
'PHP 5.6',
'PHP 5.3',
'Python 3.4',
'Python',
'Ruby 2.3 (Puma)',
'Ruby 2.3 (Passenger Standalone)',
'Tomcat 8 Java 8',
'Tomcat 7',
'IIS 8.5',
'IIS 8.5',
'IIS 8',
'Docker 1.12.6',
'Multi-container Docker 17.03.1-ce (Generic)',
'Multi-container Docker 1.11.2 (Generic)',
'GlassFish 4.1 Java 8 (Preconfigured - Docker)',
'Go 1.4 (Preconfigured - Docker)',
'Python 3.4 (Preconfigured - Docker)',
'Java 8',
'Java 7',
'Go 1.8',
'Go 1.6',
'Go 1.5',
'Go 1.4',
'Packer 1.0.0',
]
solution_stacks = [
'64bit Windows Server Core 2016 v1.2.0 running IIS 10.0',
'64bit Windows Server 2016 v1.2.0 running IIS 10.0',
'64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5',
'64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5',
'64bit Windows Server 2012 v1.2.0 running IIS 8',
'64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5',
'64bit Amazon Linux 2017.03 v2.5.3 running Java 8',
'64bit Amazon Linux 2017.03 v2.5.3 running Java 7',
'64bit Amazon Linux 2017.03 v4.2.1 running Node.js',
'64bit Amazon Linux 2017.03 v4.2.0 running Node.js',
'64bit Amazon Linux 2017.03 v4.1.1 running Node.js',
'64bit Amazon Linux 2015.09 v2.0.8 running Node.js',
'64bit Amazon Linux 2015.03 v1.4.6 running Node.js',
'64bit Amazon Linux 2014.03 v1.1.0 running Node.js',
'32bit Amazon Linux 2014.03 v1.1.0 running Node.js',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.4',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.5',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.6',
'64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6',
'64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0',
'64bit Amazon Linux 2017.03 v2.4.1 running PHP 5.6',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.4',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.5',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 5.6',
'64bit Amazon Linux 2016.03 v2.1.6 running PHP 7.0',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.6',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.5',
'64bit Amazon Linux 2015.03 v1.4.6 running PHP 5.4',
'64bit Amazon Linux 2014.03 v1.1.0 running PHP 5.5',
'64bit Amazon Linux 2014.03 v1.1.0 running PHP 5.4',
'32bit Amazon Linux 2014.03 v1.1.0 running PHP 5.5',
'32bit Amazon Linux 2014.03 v1.1.0 running PHP 5.4',
'64bit Amazon Linux running PHP 5.3',
'32bit Amazon Linux running PHP 5.3',
'64bit Amazon Linux 2017.03 v2.5.0 running Python 3.4',
'64bit Amazon Linux 2017.03 v2.5.0 running Python',
'64bit Amazon Linux 2017.03 v2.5.0 running Python 2.7',
'64bit Amazon Linux 2015.03 v1.4.6 running Python 3.4',
'64bit Amazon Linux 2015.03 v1.4.6 running Python 2.7',
'64bit Amazon Linux 2015.03 v1.4.6 running Python',
'64bit Amazon Linux 2014.03 v1.1.0 running Python 2.7',
'64bit Amazon Linux 2014.03 v1.1.0 running Python',
'32bit Amazon Linux 2014.03 v1.1.0 running Python 2.7',
'32bit Amazon Linux 2014.03 v1.1.0 running Python',
'64bit Amazon Linux running Python',
'32bit Amazon Linux running Python',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.3 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.2 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.3 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.2 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2017.03 v2.4.3 running Ruby 1.9.3',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.2 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.2 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2015.03 v1.4.6 running Ruby 1.9.3',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.1 (Puma)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.1 (Passenger Standalone)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.0 (Puma)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 2.0 (Passenger Standalone)',
'64bit Amazon Linux 2014.03 v1.1.0 running Ruby 1.9.3',
'32bit Amazon Linux 2014.03 v1.1.0 running Ruby 1.9.3',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 8 Java 8',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 7 Java 7',
'64bit Amazon Linux 2017.03 v2.6.3 running Tomcat 7 Java 6',
'64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 8 Java 8',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 7 Java 7',
'64bit Amazon Linux 2015.03 v1.4.5 running Tomcat 7 Java 6',
'64bit Amazon Linux 2015.03 v1.4.4 running Tomcat 7 Java 7',
'64bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 7',
'64bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 6',
'32bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 7',
'32bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 6',
'64bit Amazon Linux running Tomcat 7',
'64bit Amazon Linux running Tomcat 6',
'32bit Amazon Linux running Tomcat 7',
'32bit Amazon Linux running Tomcat 6',
'64bit Windows Server Core 2012 R2 running IIS 8.5',
'64bit Windows Server 2012 R2 running IIS 8.5',
'64bit Windows Server 2012 running IIS 8',
'64bit Windows Server 2008 R2 running IIS 7.5',
'64bit Amazon Linux 2017.03 v2.7.2 running Docker 17.03.1-ce',
'64bit Amazon Linux 2017.03 v2.7.1 running Docker 17.03.1-ce',
'64bit Amazon Linux 2017.03 v2.6.0 running Docker 1.12.6',
'64bit Amazon Linux 2016.09 v2.3.0 running Docker 1.11.2',
'64bit Amazon Linux 2016.03 v2.1.6 running Docker 1.11.2',
'64bit Amazon Linux 2016.03 v2.1.0 running Docker 1.9.1',
'64bit Amazon Linux 2015.09 v2.0.6 running Docker 1.7.1',
'64bit Amazon Linux 2015.03 v1.4.6 running Docker 1.6.2',
'64bit Amazon Linux 2017.03 v2.7.3 running Multi-container Docker 17.03.1-ce (Generic)',
'64bit Amazon Linux 2016.03 v2.1.6 running Multi-container Docker 1.11.2 (Generic)',
'64bit Amazon Linux 2016.03 v2.1.0 running Multi-container Docker 1.9.1 (Generic)',
'64bit Amazon Linux 2015.03 v1.4.6 running Multi-container Docker 1.6.2 (Generic)',
'64bit Debian jessie v2.7.2 running GlassFish 4.1 Java 8 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running GlassFish 4.0 Java 7 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running GlassFish 4.1 Java 8 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running GlassFish 4.0 Java 7 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Go 1.4 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Go 1.3 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Go 1.4 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Go 1.3 (Preconfigured - Docker)',
'64bit Debian jessie v2.7.2 running Python 3.4 (Preconfigured - Docker)',
'64bit Debian jessie v1.4.6 running Python 3.4 (Preconfigured - Docker)',
'64bit Amazon Linux 2017.03 v2.5.1 running Go 1.8',
'64bit Amazon Linux 2016.09 v2.3.3 running Go 1.6',
'64bit Amazon Linux 2016.09 v2.3.0 running Go 1.5',
'64bit Amazon Linux 2016.03 v2.1.0 running Go 1.4',
'64bit Amazon Linux 2017.03 v2.3.1 running Packer 1.0.3',
'64bit Amazon Linux 2017.03 v2.2.2 running Packer 1.0.0',
]
solution_stacks = [SolutionStack(solution_stack) for solution_stack in solution_stacks]
custom_platforms = [
'arn:aws:elasticbeanstalk:us-west-2:12345678:platform/custom-platform-1/1.0.0',
]
solution_stack_lister_mock.return_value = solution_stacks
custom_platforms_lister_mock.return_value = custom_platforms
for solution_string in solution_strings:
solution_stack_ops.find_solution_stack_from_string(solution_string)
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
@mock.patch('ebcli.operations.platform_version_ops.list_custom_platform_versions')
def test_find_solution_stack_from_string__custom_platform(
self,
custom_platforms_lister_mock,
solution_stack_lister_mock
):
solution_stack_lister_mock.return_value = []
custom_platforms_lister_mock.return_value = [
'arn:aws:elasticbeanstalk:us-west-2:12345678:platform/custom-platform-1/1.0.0',
]
custom_platform_inputs = [
'arn:aws:elasticbeanstalk:us-west-2:12345678:platform/custom-platform-1/1.0.0',
'custom-platform-1'
]
for custom_platform_input in custom_platform_inputs:
self.assertEqual(
PlatformVersion('arn:aws:elasticbeanstalk:us-west-2:12345678:platform/custom-platform-1/1.0.0'),
solution_stack_ops.find_solution_stack_from_string(custom_platform_input)
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
@mock.patch('ebcli.operations.solution_stack_ops.platform_arn_to_solution_stack')
def test_find_solution_stack_from_string__eb_managed_platform(
self,
platform_arn_to_solution_stack_mock,
solution_stack_lister_mock
):
solution_stack_lister_mock.return_value = [
'64bit Amazon Linux 2017.09 v2.7.1 running Tomcat 8 Java 8'
]
platform_arn_to_solution_stack_mock.return_value = SolutionStack(
'64bit Amazon Linux 2017.09 v2.7.1 running Tomcat 8 Java 8'
)
self.assertEqual(
SolutionStack('64bit Amazon Linux 2017.09 v2.7.1 running Tomcat 8 Java 8'),
solution_stack_ops.find_solution_stack_from_string(
'arn:aws:elasticbeanstalk:us-west-2::platform/Tomcat 8 with Java 8 running on 64bit Amazon Linux/2.7.1'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
def test_find_solution_stack_from_string__retrieves_latest(self, solution_stacks_retriever_mock):
solution_stacks = [
SolutionStack('64bit Amazon Linux 2017.03 v4.2.1 running Node.js'),
SolutionStack('64bit Amazon Linux 2017.03 v4.2.0 running Node.js')
]
solution_stacks_retriever_mock.return_value = solution_stacks
self.assertEqual(
SolutionStack('64bit Amazon Linux 2017.03 v4.2.1 running Node.js'),
solution_stack_ops.find_solution_stack_from_string(
'64bit Amazon Linux 2017.03 v4.2.0 running Node.js',
find_newer=True
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
def test_find_solution_stack_from_string__retrieves_latest_python_solution_Stack(self, solution_stacks_retriever_mock):
solution_stacks = [
SolutionStack('64bit Amazon Linux 2014.09 v1.1.0 running Python 2.7'),
SolutionStack('64bit Amazon Linux 2014.09 v1.1.0 running Python 3.6')
]
solution_stacks_retriever_mock.return_value = solution_stacks
self.assertEqual(
SolutionStack('64bit Amazon Linux 2014.09 v1.1.0 running Python 2.7'),
solution_stack_ops.find_solution_stack_from_string(
'Python 2.7',
find_newer=True
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.get_available_solution_stacks')
@mock.patch('ebcli.operations.platform_version_ops.get_latest_custom_platform_version')
def test_find_solution_stack_from_string__return_latest_custom_platform(
self,
get_latest_custom_platform_version_mock,
available_solution_stacks_mock
):
available_solution_stacks_mock.return_value = []
latest_custom_platform_arn = 'arn:aws:elasticbeanstalk:us-west-2:123123123:platform/custom-platform-2/1.0.3'
get_latest_custom_platform_version_mock.return_value = PlatformVersion(latest_custom_platform_arn)
self.assertEqual(
PlatformVersion(latest_custom_platform_arn),
solution_stack_ops.find_solution_stack_from_string(
latest_custom_platform_arn,
find_newer=True
)
)
def test_get_default_solution_stack(self):
ebcli_root = os.getcwd()
test_dir = 'testDir'
os.mkdir(test_dir)
os.mkdir(os.path.join(test_dir, '.elasticbeanstalk'))
os.chdir(test_dir)
with open(os.path.join('.elasticbeanstalk', 'config.yml'), 'w') as config_yml:
config_yml_contents = {
'branch-defaults': {
'default': {
'environment': 'default-environment'
}
},
'global': {
'application_name': 'default-application',
'default_platform': 'Python 3.6'
}
}
yaml.dump(config_yml_contents, config_yml)
config_yml.close()
try:
self.assertEqual(
'Python 3.6',
solution_stack_ops.get_default_solution_stack()
)
finally:
os.chdir(ebcli_root)
shutil.rmtree(test_dir)
@mock.patch('ebcli.lib.utils.prompt_for_index_in_list')
def test_prompt_for_solution_stack_version(self, index_prompter_mock):
matching_language_versions = [
{
'PlatformShorthand': 'Tomcat 8 Java 8',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.09 v2.7.0 running Tomcat 8 Java 8'
},
{
'PlatformShorthand': 'Tomcat 7 Java 7',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.09 v2.7.0 running Tomcat 7 Java 7'
},
{
'PlatformShorthand': 'Tomcat 7 Java 6',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 7 Java 6'
}
]
index_prompter_mock.return_value = 2
self.assertEqual(
'Tomcat 7 Java 6',
solution_stack_ops.prompt_for_solution_stack_version(matching_language_versions)
)
def test_resolve_language_version__exactly_one_version_found(self):
matching_language_versions = [
{
'PlatformShorthand': 'Node.js',
'LanguageName': 'Node.js',
'SolutionStack': '64bit Amazon Linux 2017.09 v4.4.0 running Node.js'
}
]
SolutionStack.group_solution_stacks_by_platform_shorthand = mock.MagicMock(return_value=matching_language_versions)
self.assertEqual(
'64bit Amazon Linux 2017.09 v4.4.0 running Node.js',
solution_stack_ops.resolve_language_version(
'Node.js',
[
mock.MagicMock('solution-stack-1'),
mock.MagicMock('solution-stack-2')
]
)
)
@mock.patch('ebcli.operations.solution_stack_ops.prompt_for_solution_stack_version')
def test_resolve_language_version__multiple_versions_found(
self,
solution_stack_prompter_mock
):
matching_language_versions = [
{
'PlatformShorthand': 'Tomcat 8 Java 8',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.09 v2.7.0 running Tomcat 8 Java 8'
},
{
'PlatformShorthand': 'Tomcat 7 Java 7',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.09 v2.7.0 running Tomcat 7 Java 7'
},
{
'PlatformShorthand': 'Tomcat 7 Java 6',
'LanguageName': 'Tomcat',
'SolutionStack': '64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 7 Java 6'
}
]
solution_stack_prompter_mock.return_value = matching_language_versions[0]['PlatformShorthand']
SolutionStack.group_solution_stacks_by_platform_shorthand = mock.MagicMock(return_value=matching_language_versions)
self.assertEqual(
'64bit Amazon Linux 2017.09 v2.7.0 running Tomcat 8 Java 8',
solution_stack_ops.resolve_language_version(
'Tomcat',
[
mock.MagicMock('solution-stack-1'),
mock.MagicMock('solution-stack-2')
]
)
)
def test_platform_arn_to_solution_stack__custom_platform_arn(self):
platform_arn = 'arn:aws:elasticbeanstalk:us-west-2:123123123:platform/custom-platform-test-test-4/1.0.0'
self.assertIsNone(solution_stack_ops.platform_arn_to_solution_stack(platform_arn))
def test_platform_arn_to_solution_stack__preconfigured_solution_stack_arns(self):
platform_arns = [
'arn:aws:elasticbeanstalk:us-west-2::platform/Docker running on 64bit Amazon Linux/2.8.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Elastic Beanstalk Packer Builder/2.4.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Go 1 running on 64bit Amazon Linux/2.7.1',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 10.0 running on 64bit Windows Server 2016/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 10.0 running on 64bit Windows Server Core 2016/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 7.5 running on 64bit Windows Server 2008 R2/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 7.5 running on 64bit Windows Server 2008 R2/0.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8 running on 64bit Windows Server 2012/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8 running on 64bit Windows Server 2012/0.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8.5 running on 64bit Windows Server 2012 R2/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8.5 running on 64bit Windows Server 2012 R2/0.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8.5 running on 64bit Windows Server Core 2012 R2/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/IIS 8.5 running on 64bit Windows Server Core 2012 R2/0.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Java 8 running on 64bit Amazon Linux/2.6.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Multi-container Docker running on 64bit Amazon Linux/2.8.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 32bit Amazon Linux/1.2.1',
'arn:aws:elasticbeanstalk:us-west-2::platform/Node.js running on 32bit Amazon Linux 2014.03/1.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Passenger with Ruby 1.9.3 running on 32bit Amazon Linux/1.2.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Passenger with Ruby 1.9.3 running on 32bit Amazon Linux 2014.03/1.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Passenger with Ruby 2.4 running on 64bit Amazon Linux/2.6.1',
'arn:aws:elasticbeanstalk:us-west-2::platform/Passenger with Ruby 2.4 running on 64bit Amazon Linux/2.6.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.1 running on 64bit Amazon Linux/2.5.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Preconfigured Docker - GlassFish 4.0 with Java 7 running on 64bit Debian/2.8.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Preconfigured Docker - Python 3.4 running on 64bit Debian/2.8.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Puma with Ruby 2.4 running on 64bit Amazon Linux/2.6.1',
'arn:aws:elasticbeanstalk:us-west-2::platform/Python 2.7 running on 64bit Amazon Linux 2014.03/1.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Python 3.4 running on 64bit Amazon Linux/2.6.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Tomcat 7 with Java 7 running on 32bit Amazon Linux 2014.03/1.1.0',
'arn:aws:elasticbeanstalk:us-west-2::platform/Tomcat 8 with Java 8 running on 64bit Amazon Linux/2.7.1',
]
platform_descriptions = [
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.8.0 running Docker 17.06.2-ce'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.4.0 running Packer 1.0.3'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.7.1 running Go 1.9'
},
{
'SolutionStackName': '64bit Windows Server 2016 v1.2.0 running IIS 10.0'
},
{
'SolutionStackName': '64bit Windows Server Core 2016 v1.2.0 running IIS 10.0'
},
{
'SolutionStackName': '64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5'
},
{
'SolutionStackName': '64bit Windows Server 2008 R2 running IIS 7.5'
},
{
'SolutionStackName': '64bit Windows Server 2012 v1.2.0 running IIS 8'
},
{
'SolutionStackName': '64bit Windows Server 2012 running IIS 8'
},
{
'SolutionStackName': '64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5'
},
{
'SolutionStackName': '64bit Windows Server 2012 R2 running IIS 8.5'
},
{
'SolutionStackName': '64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5'
},
{
'SolutionStackName': '64bit Windows Server Core 2012 R2 running IIS 8.5'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.6.0 running Java 8'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.8.0 running Multi-container Docker 17.06.2-ce (Generic)'
},
{
'SolutionStackName': '32bit Amazon Linux 2014.09 v1.2.1 running Node.js'
},
{
'SolutionStackName': '32bit Amazon Linux 2014.03 v1.1.0 running Node.js'
},
{
'SolutionStackName': '32bit Amazon Linux 2014.09 v1.2.0 running Ruby 1.9.3'
},
{
'SolutionStackName': '32bit Amazon Linux 2014.03 v1.1.0 running Ruby 1.9.3'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.6.1 running Ruby 2.4 (Passenger Standalone)'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.6.0 running Ruby 2.4 (Passenger Standalone)'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1'
},
{
'SolutionStackName': '64bit Debian jessie v2.8.0 running GlassFish 4.0 Java 7 (Preconfigured - Docker)'
},
{
'SolutionStackName': '64bit Debian jessie v2.8.0 running Python 3.4 (Preconfigured - Docker)'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.6.1 running Ruby 2.4 (Puma)'
},
{
'SolutionStackName': '64bit Amazon Linux 2014.03 v1.1.0 running Python 2.7'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.6.0 running Python 3.4'
},
{
'SolutionStackName': '32bit Amazon Linux 2014.03 v1.1.0 running Tomcat 7 Java 7'
},
{
'SolutionStackName': '64bit Amazon Linux 2017.09 v2.7.1 running Tomcat 8 Java 8'
},
]
for index in range(0, len(platform_arns)):
with mock.patch('ebcli.lib.elasticbeanstalk.describe_platform_version') as describe_platform_version_mock:
describe_platform_version_mock.return_value = platform_descriptions[index]
self.assertEqual(
SolutionStack(platform_descriptions[index]['SolutionStackName']),
solution_stack_ops.platform_arn_to_solution_stack(platform_arns[index])
)
|
vmware_host.py
|
mkevenaar/SysAdminBoard
| 293 |
141008
|
#!/usr/bin/env python
"""vmware_host - Exports JSON files with CPU and RAM data for VMware ESX hosts
# Requires VMware Python SDK: pyvmomi
# https://code.google.com/p/pysphere/
# pip install pyvmomi
"""
from credentials import VMWARE_VCENTER_USERNAME
from credentials import VMWARE_VCENTER_PASSWORD
import operator
import time
import json
import logging.config
from pyVim.connect import SmartConnect
from pchelper import collect_properties
from pchelper import get_container_view
import pyVmomi
import ssl
__author__ = '<EMAIL> (<NAME>)'
# =================================SETTINGS======================================
# VCenter Servers
VCENTER_SERVERS = [
{"name": "vcenter", "username": VMWARE_VCENTER_USERNAME, "password": <PASSWORD>},
{"name": "view-vcenter", "username": VMWARE_VCENTER_USERNAME, "password": <PASSWORD>}
]
SAMPLE_INTERVAL = 60
MAX_DATAPOINTS = 30
MAX_HOST_RESULTS = 11
# ===============================================================================
class MonitorJSON:
"""This is a simple class passed to Monitor threads so we can access the current JSON data in that thread"""
def __init__(self):
host_data = [{"name": "----", "status": 0, "cpu": [0, 0, 0, 0], "ram": 0}]
self.json = json.dumps({"hosts": host_data}, indent=4)
self.vcenter_servers = VCENTER_SERVERS
class ESXHost:
all_hosts = [] # Static array containing all hosts
def __init__(self, managed_object_reference, name):
self.managed_object_reference = managed_object_reference
self.name = name
self.status = 0
self.cpu_datapoints = []
self.ram = 0
self.ram_percent = 0
self.relative_weight = 1
self.__class__.all_hosts.append(self) # Add self to static array
def update_relative_weight(self):
"""The relative weight is used to determine how much we want to see the data of this Host."""
self.relative_weight = 1
# Add up all of the historical cpu datapoints (higher CPU = more weight)
for i in self.cpu_datapoints:
self.relative_weight += i
# Multiply by the status value (so VMs with red alarm have most weight)
self.relative_weight *= (self.status * 10)
@classmethod
def find_by_name(cls, managed_object_reference, name):
for host in cls.all_hosts:
if host.name == name:
return host
# if not found, create one and return it instead
return ESXHost(managed_object_reference, name)
def hostname_from_fqdn(fqdn):
"""Will take a fully qualified domain name and return only the hostname."""
split_fqdn = fqdn.split('.', 1) # Split fqdn at periods, but only bother doing first split
return split_fqdn[0]
def connect_vcenter(vcenter_server, vcenter_username, vcenter_password):
"""This function will connect to the specified vCenter server."""
logger = logging.getLogger(__name__)
# Disable certificate verification otherwise it will error
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
ssl_context.verify_mode = ssl.CERT_NONE
server_instance = None
try:
logger.debug("Connecting to: " + vcenter_server)
server_instance = SmartConnect(host=vcenter_server, user=vcenter_username,
pwd=<PASSWORD>, sslContext=ssl_context)
server_instance._stub.connectionPoolTimeout = -1 # Turn the connection timeout off (default 900 sec)
except Exception as error:
logger.error("Error connecting to " + vcenter_server + str(error))
return server_instance
def update_host_data(server):
"""This function is called to update the HOST data for the specified vcenter server"""
logger = logging.getLogger(__name__)
# API: HostSystem -
# https://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.HostSystem.html
# summary.overallStatus: general "health" value: gray, green, red, yellow
# summary.quickStats.overallCpuUsage: Aggregated CPU usage across all cores on the host in MHz.
# summary.quickStats.overallMemoryUsage: Physical memory usage on the host in MB.
# hardware.memorySize: Total available RAM in bytes.
try:
# Fast way of getting Host properties using PropertyCollector
# https://github.com/vmware/pyvmomi/blob/master/docs/vmodl/query/PropertyCollector.rst
logger.debug("Getting PropertyCollector for " + server["name"])
container_view = get_container_view(server["conn"], obj_type=[pyVmomi.vim.HostSystem])
query = [
"name",
"summary.overallStatus",
"summary.quickStats.overallCpuUsage",
"summary.quickStats.overallMemoryUsage",
"hardware.memorySize"
]
props = collect_properties(server["conn"], container_view,
pyVmomi.vim.HostSystem, query, include_mors=True)
except Exception as error:
logger.error("Error collecting VMware Host data from " + server["name"] + str(error))
raise
# Loop through all of the ESX servers in props
for prop_set in props:
host_mor = prop_set["obj"] # Managed object reference
host_name = prop_set["name"]
host_name = hostname_from_fqdn(host_name) # trim out the domain name
host_status = prop_set["summary.overallStatus"]
host_cpu = prop_set["summary.quickStats.overallCpuUsage"]
host_ram = prop_set["summary.quickStats.overallMemoryUsage"]
host_ram_max = prop_set["hardware.memorySize"]
host_ram_max = int(host_ram_max / 1024 / 1024) # Convert to Megabytes to match overallMemoryUsage
host_ram_percent = int((host_ram / host_ram_max) * 100) # Calculate RAM percentage
logger.debug(host_name + " RAM: " + str(host_ram_percent) + "% CPU: " + str(host_cpu))
# Convert ram into Gigabytes and round to 1 decimal place
host_ram = int(host_ram / 1024)
# Find/Create this host in our list of hosts and update the object's data
host = ESXHost.find_by_name(host_mor, host_name)
if host_status == "green":
host.status = 1
elif host_status == "yellow":
host.status = 2
elif host_status == "red":
host.status = 3
else:
host.status = 0
# Add the raw data to the ESXHost object
# For RAM datapoints, we want to do a bar graph, so only include the current value
host.ram = host_ram
host.ram_percent = host_ram_percent
# For CPU datapoints, we want to do a line graph, so we need a history
if len(host.cpu_datapoints) == 0: # first time through, initialize the list
host.cpu_datapoints = [host_cpu]
else:
host.cpu_datapoints.append(host_cpu)
# If we already have the max number of datapoints in our list, delete the oldest item
if len(host.cpu_datapoints) >= MAX_DATAPOINTS:
del(host.cpu_datapoints[0])
# Update ranking value of this Host to determine if we should show it
host.update_relative_weight()
#
#
#
#
def generate_json(vmware_monitor):
"""This is the main function. It will connect to the vCenter server, obtain perf data and output files"""
logger = logging.getLogger("vmware_host")
# Process each vcenter server
for server in vmware_monitor.vcenter_servers:
logger.debug("Starting " + server["name"])
if "conn" not in server:
logger.debug(server["name"] + " not currently connected.")
server["conn"] = connect_vcenter(server["name"], server["username"], server["password"])
if server["conn"] is None:
logger.warning("Unable to connect to " + server["name"] + " will retry in " +
str(SAMPLE_INTERVAL) + " seconds.")
vmware_monitor.json = json.dumps({"vms": [{"error": "Unable to connect to " + server["name"] +
" will retry in " + str(SAMPLE_INTERVAL) +
" seconds."}]}, indent=4)
return vmware_monitor # Could not connect so return
# Final test if we're connected
try:
if server["conn"].content.sessionManager.currentSession:
logger.debug("Connected to " + server["name"] + " at " +
str(server["conn"].content.sessionManager.currentSession.loginTime))
except Exception as error:
logger.error("Final test: Error connecting to " + server["name"] + str(error))
vmware_monitor.json = json.dumps({"vms": [{"error": "Unable to connect to " + server["name"] +
" will retry in " + str(SAMPLE_INTERVAL) +
" seconds."}]}, indent=4)
return vmware_monitor # Could not connect so return
# Update all the ESX host objects for the specified vCenter server
try:
update_host_data(server)
except Exception as error:
logger.error("Error updating data from " + server["name"] + str(error))
vmware_monitor.json = json.dumps({"vms": [{"error": "Error updating data from " + server["name"] +
" will retry in " + str(SAMPLE_INTERVAL) +
" seconds."}]}, indent=4)
return vmware_monitor
# Sort by relative weight
ESXHost.all_hosts.sort(key=operator.attrgetter('relative_weight'), reverse=True)
# We have all the data we need, so format and set output
host_data = []
for i, host in enumerate(ESXHost.all_hosts):
# Generate the data sequence
host_data.append({
"name": host.name,
"status": host.status,
"cpu": host.cpu_datapoints,
"ram": host.ram,
"ram_percent": host.ram_percent
})
if i >= (MAX_HOST_RESULTS - 1): # Don't return more hosts than we need
break
vmware_monitor.json = json.dumps({"hosts": host_data})
if __debug__:
logger.debug(vmware_monitor.json)
# ======================================================
# __main__
#
# If you run this module by itself, it will instantiate
# the MonitorJSON class and start an infinite loop
# printing data.
# ======================================================
#
if __name__ == '__main__':
# When run by itself, we need to create the logger object (which is normally created in webserver.py)
try:
f = open("log_settings.json", 'rt')
log_config = json.load(f)
f.close()
logging.config.dictConfig(log_config)
except FileNotFoundError as e:
print("Log configuration file not found: " + str(e))
logging.basicConfig(level=logging.DEBUG) # fallback to basic settings
except json.decoder.JSONDecodeError as e:
print("Error parsing logger config file: " + str(e))
raise
monitor = MonitorJSON()
while True:
main_logger = logging.getLogger(__name__)
generate_json(monitor)
# Wait X seconds for the next iteration
main_logger.debug("Waiting for " + str(SAMPLE_INTERVAL) + " seconds")
time.sleep(SAMPLE_INTERVAL)
|
test/mavsdk_tests/logger_helper.py
|
SaxionMechatronics/Firmware
| 4,224 |
141009
|
<reponame>SaxionMechatronics/Firmware
#!/usr/bin/env python3
import re
import sys
import os
from enum import Enum
from functools import lru_cache
class color(Enum):
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
GRAY = '\033[90m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RESET = '\033[0m'
def colorize(text: str, c: color) -> str:
if _supports_color():
return str(c.value) + text + color.RESET.value
else:
return text
def maybe_strip_color(text: str) -> str:
"""Remove ANSI and xterm256 color codes.
From https://stackoverflow.com/a/30500866/8548472
"""
if not _supports_color():
return re.sub(r'\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))', '', text)
else:
return text
@lru_cache()
def _supports_color() -> bool:
"""Returns True if the running system's terminal supports color.
From https://stackoverflow.com/a/22254892/8548472
"""
supported_platform = \
(sys.platform != 'Pocket PC') and \
(sys.platform != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
return supported_platform and is_a_tty
|
src/cocoa/toga_cocoa/widgets/internal/data.py
|
freespace/toga
| 1,261 |
141034
|
from toga_cocoa.libs import NSObject, objc_method
class TogaData(NSObject):
@objc_method
def copyWithZone_(self):
# TogaData is used as an immutable reference to a row
# so the same object can be returned as a copy.
self.retain()
return self
|
examples/pawn.py
|
TonyZYT2000/vapory
| 376 |
141096
|
""" Just a purple sphere """
from vapory import *
objects = [
# SUN
LightSource([1500,2500,-2500], 'color',1),
# SKY
Sphere( [0,0,0],1, 'hollow',
Texture(
Pigment( 'gradient', [0,1,0],
'color_map{[0 color White] [1 color Blue ]}'
'quick_color', 'White'
),
Finish( 'ambient', 1, 'diffuse', 0)
),
'scale', 10000
),
# GROUND
Plane( [0,1,0], 0 ,
Texture( Pigment( 'color', [1.1*e for e in [0.80,0.55,0.35]])),
Normal( 'bumps', 0.75, 'scale', 0.035),
Finish( 'phong', 0.1 )
),
# PAWN
Union( Sphere([0,1,0],0.35),
Cone([0,0,0],0.5,[0,1,0],0.0),
Texture( Pigment( 'color', [1,0.65,0])),
Finish( 'phong', 0.5)
)
]
scene = Scene( Camera( 'ultra_wide_angle',
'angle',45,
'location',[0.0 , 0.6 ,-3.0],
'look_at', [0.0 , 0.6 , 0.0]
),
objects= objects,
included=['colors.inc']
)
scene.render('pawn.png', remove_temp=False)
|
Reverse Engineering/resources/scripts/Install Commonly Used Utilities/debugserver.py
|
bzxy/cydia
| 678 |
141097
|
#!/usr/bin/env python
import subprocess
import string
import os
Path = subprocess.check_output(['xcode-select', '-p'])
Path = Path + "/Platforms/iPhoneOS.platform/DeviceSupport"
Path = string.replace(Path, "\n", "")
print "Developer Images Located At:", Path
for Sub in os.listdir(Path):
SubDire = os.path.join(Path, Sub)
if (os.path.isdir(SubDire)):
print "Loading Developer Disk Image At:\n",SubDire
os.system("hdiutil mount "+"\""+SubDire+"\""+"/DeveloperDiskImage.dmg -mountpoint /Volumes/DeveloperDiskImage")
DebugSrvPath=SubDire+"/debugserver"
print DebugSrvPath
print "Current DebugServer Copied To:\n"+DebugSrvPath
os.system("cp /Volumes/DeveloperDiskImage/usr/bin/debugserver "+"\""+SubDire+"\"")
os.system("codesign -s - --entitlements ./debugsrvEntitle.xml -f "+"\""+DebugSrvPath+"\"")
Version=os.path.basename(os.path.normpath(SubDire))#Thanks http://stackoverflow.com/questions/3925096/how-to-get-only-the-last-part-of-a-path-in-python
print "Version For Current DebugServer:\n"+Version
os.system("cp "+"\""+DebugSrvPath+"\""+" ./DebugServer"+"\""+Version+"\"")
print "Unloading Developer Disk Image At:\n",SubDire
os.system("hdiutil unmount /Volumes/DeveloperDiskImage")
os.remove(DebugSrvPath)
|
boto3_type_annotations/boto3_type_annotations/route53domains/client.py
|
cowboygneox/boto3_type_annotations
| 119 |
141099
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def check_domain_availability(self, DomainName: str, IdnLangCode: str = None) -> Dict:
pass
def check_domain_transferability(self, DomainName: str, AuthCode: str = None) -> Dict:
pass
def delete_tags_for_domain(self, DomainName: str, TagsToDelete: List) -> Dict:
pass
def disable_domain_auto_renew(self, DomainName: str) -> Dict:
pass
def disable_domain_transfer_lock(self, DomainName: str) -> Dict:
pass
def enable_domain_auto_renew(self, DomainName: str) -> Dict:
pass
def enable_domain_transfer_lock(self, DomainName: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_contact_reachability_status(self, domainName: str = None) -> Dict:
pass
def get_domain_detail(self, DomainName: str) -> Dict:
pass
def get_domain_suggestions(self, DomainName: str, SuggestionCount: int, OnlyAvailable: bool) -> Dict:
pass
def get_operation_detail(self, OperationId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_domains(self, Marker: str = None, MaxItems: int = None) -> Dict:
pass
def list_operations(self, SubmittedSince: datetime = None, Marker: str = None, MaxItems: int = None) -> Dict:
pass
def list_tags_for_domain(self, DomainName: str) -> Dict:
pass
def register_domain(self, DomainName: str, DurationInYears: int, AdminContact: Dict, RegistrantContact: Dict, TechContact: Dict, IdnLangCode: str = None, AutoRenew: bool = None, PrivacyProtectAdminContact: bool = None, PrivacyProtectRegistrantContact: bool = None, PrivacyProtectTechContact: bool = None) -> Dict:
pass
def renew_domain(self, DomainName: str, CurrentExpiryYear: int, DurationInYears: int = None) -> Dict:
pass
def resend_contact_reachability_email(self, domainName: str = None) -> Dict:
pass
def retrieve_domain_auth_code(self, DomainName: str) -> Dict:
pass
def transfer_domain(self, DomainName: str, DurationInYears: int, AdminContact: Dict, RegistrantContact: Dict, TechContact: Dict, IdnLangCode: str = None, Nameservers: List = None, AuthCode: str = None, AutoRenew: bool = None, PrivacyProtectAdminContact: bool = None, PrivacyProtectRegistrantContact: bool = None, PrivacyProtectTechContact: bool = None) -> Dict:
pass
def update_domain_contact(self, DomainName: str, AdminContact: Dict = None, RegistrantContact: Dict = None, TechContact: Dict = None) -> Dict:
pass
def update_domain_contact_privacy(self, DomainName: str, AdminPrivacy: bool = None, RegistrantPrivacy: bool = None, TechPrivacy: bool = None) -> Dict:
pass
def update_domain_nameservers(self, DomainName: str, Nameservers: List, FIAuthKey: str = None) -> Dict:
pass
def update_tags_for_domain(self, DomainName: str, TagsToUpdate: List = None) -> Dict:
pass
def view_billing(self, Start: datetime = None, End: datetime = None, Marker: str = None, MaxItems: int = None) -> Dict:
pass
|
code/utils.py
|
kongmoumou/BootEA
| 131 |
141107
|
<reponame>kongmoumou/BootEA<gh_stars>100-1000
import numpy as np
import time
from triples import Triples
def read_input(folder):
triples_set1 = read_triples(folder + 'triples_1')
triples_set2 = read_triples(folder + 'triples_2')
triples1 = Triples(triples_set1)
triples2 = Triples(triples_set2)
total_ent_num = len(triples1.ents | triples2.ents)
total_rel_num = len(triples1.props | triples2.props)
total_triples_num = len(triples1.triple_list) + len(triples2.triple_list)
print('total ents:', total_ent_num)
print('total rels:', len(triples1.props), len(triples2.props), total_rel_num)
print('total triples: %d + %d = %d' % (len(triples1.triples), len(triples2.triples), total_triples_num))
ref_ent1, ref_ent2 = read_references(folder + 'ref_ent_ids')
assert len(ref_ent1) == len(ref_ent2)
print("To aligned entities:", len(ref_ent1))
sup_ent1, sup_ent2 = read_references(folder + 'sup_ent_ids')
return triples1, triples2, sup_ent1, sup_ent2, ref_ent1, ref_ent2, total_triples_num, total_ent_num, total_rel_num
def read_dbp15k_input(folder):
triples_set1 = read_triples(folder + 'triples_1')
triples_set2 = read_triples(folder + 'triples_2')
triples1 = Triples(triples_set1)
triples2 = Triples(triples_set2)
total_ent_num = len(triples1.ents | triples2.ents)
total_rel_num = len(triples1.props | triples2.props)
total_triples_num = len(triples1.triple_list) + len(triples2.triple_list)
print('total ents:', total_ent_num)
print('total rels:', len(triples1.props), len(triples2.props), total_rel_num)
print('total triples: %d + %d = %d' % (len(triples1.triples), len(triples2.triples), total_triples_num))
ref_ent1, ref_ent2 = read_references(folder + 'ref_pairs')
assert len(ref_ent1) == len(ref_ent2)
print("To aligned entities:", len(ref_ent1))
sup_ent1, sup_ent2 = read_references(folder + 'sup_pairs')
return triples1, triples2, sup_ent1, sup_ent2, ref_ent1, ref_ent2, total_triples_num, total_ent_num, total_rel_num
def generate_sup_triples(triples1, triples2, ents1, ents2):
def generate_newly_triples(ent1, ent2, rt_dict1, hr_dict1):
newly_triples = set()
for r, t in rt_dict1.get(ent1, set()):
newly_triples.add((ent2, r, t))
for h, r in hr_dict1.get(ent1, set()):
newly_triples.add((h, r, ent2))
return newly_triples
assert len(ents1) == len(ents2)
newly_triples1, newly_triples2 = set(), set()
for i in range(len(ents1)):
newly_triples1 |= (generate_newly_triples(ents1[i], ents2[i], triples1.rt_dict, triples1.hr_dict))
newly_triples2 |= (generate_newly_triples(ents2[i], ents1[i], triples2.rt_dict, triples2.hr_dict))
print("supervised triples: {}, {}".format(len(newly_triples1), len(newly_triples2)))
return newly_triples1, newly_triples2
def add_sup_triples(triples1, triples2, sup_ent1, sup_ent2):
newly_triples1, newly_triples2 = generate_sup_triples(triples1, triples2, sup_ent1, sup_ent2)
triples1 = Triples(triples1.triples | newly_triples1, ori_triples=triples1.triples)
triples2 = Triples(triples2.triples | newly_triples2, ori_triples=triples2.triples)
print("now triples: {}, {}".format(len(triples1.triples), len(triples2.triples)))
return triples1, triples2
def pair2file(file, pairs):
with open(file, 'w', encoding='utf8') as f:
for i, j in pairs:
f.write(str(i) + '\t' + str(j) + '\n')
f.close()
def read_triples(file):
triples = set()
with open(file, 'r', encoding='utf8') as f:
for line in f:
params = line.strip('\n').split('\t')
assert len(params) == 3
h = int(params[0])
r = int(params[1])
t = int(params[2])
triples.add((h, r, t))
f.close()
return triples
def read_references(file):
ref1, ref2 = list(), list()
with open(file, 'r', encoding='utf8') as f:
for line in f:
params = line.strip('\n').split('\t')
assert len(params) == 2
e1 = int(params[0])
e2 = int(params[1])
ref1.append(e1)
ref2.append(e2)
f.close()
assert len(ref1) == len(ref2)
return ref1, ref2
def div_list(ls, n):
ls_len = len(ls)
if n <= 0 or 0 == ls_len:
return []
if n > ls_len:
return []
elif n == ls_len:
return [[i] for i in ls]
else:
j = ls_len // n
k = ls_len % n
ls_return = []
for i in range(0, (n - 1) * j, j):
ls_return.append(ls[i:i + j])
ls_return.append(ls[(n - 1) * j:])
return ls_return
def triples2ht_set(triples):
ht_set = set()
for h, r, t in triples:
ht_set.add((h, t))
print("the number of ht: {}".format(len(ht_set)))
return ht_set
def merge_dic(dic1, dic2):
return {**dic1, **dic2}
def generate_adjacency_mat(triples1, triples2, ent_num, sup_ents):
adj_mat = np.mat(np.zeros((ent_num, len(sup_ents)), dtype=np.int32))
ht_set = triples2ht_set(triples1) | triples2ht_set(triples2)
for i in range(ent_num):
for j in sup_ents:
if (i, j) in ht_set:
adj_mat[i, sup_ents.index(j)] = 1
print("shape of adj_mat: {}".format(adj_mat.shape))
print("the number of 1 in adjacency matrix: {}".format(np.count_nonzero(adj_mat)))
return adj_mat
def generate_adj_input_mat(adj_mat, d):
W = np.random.randn(adj_mat.shape[1], d)
M = np.matmul(adj_mat, W)
print("shape of input adj_mat: {}".format(M.shape))
return M
def generate_ent_attrs_sum(ent_num, ent_attrs1, ent_attrs2, attr_embeddings):
t1 = time.time()
ent_attrs_embeddings = None
for i in range(ent_num):
attrs_index = list(ent_attrs1.get(i, set()) | ent_attrs2.get(i, set()))
assert len(attrs_index) > 0
attrs_embeds = np.sum(attr_embeddings[attrs_index,], axis=0)
if ent_attrs_embeddings is None:
ent_attrs_embeddings = attrs_embeds
else:
ent_attrs_embeddings = np.row_stack((ent_attrs_embeddings, attrs_embeds))
print("shape of ent_attr_embeds: {}".format(ent_attrs_embeddings.shape))
print("generating ent features costs: {:.3f} s".format(time.time() - t1))
return ent_attrs_embeddings
|
opendatatools/nhindex/__init__.py
|
solider245/OpenData
| 1,179 |
141109
|
# encoding: utf-8
from .nhindex_interface import *
|
python/IECoreMaya/FnParameterisedHolder.py
|
bradleyhenke/cortex
| 386 |
141110
|
##########################################################################
#
# Copyright (c) 2008-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import warnings
import maya.OpenMaya
import maya.cmds
import IECore
import _IECoreMaya
import StringUtil
## A function set for operating on the various IECoreMaya::ParameterisedHolder
# types. This allows setting and getting of plug and parameter values, and
# setting and getting of the Parameterised object being held.
class FnParameterisedHolder( maya.OpenMaya.MFnDependencyNode ) :
## Initialise the function set for the given object, which may
# either be an MObject or a node name in string or unicode form.
def __init__( self, object ) :
if isinstance( object, str ) or isinstance( object, unicode ) :
object = StringUtil.dependencyNodeFromString( object )
maya.OpenMaya.MFnDependencyNode.__init__( self, object )
## Sets the IECore.Parameterised object held by the node. This function can be called
# in two ways :
#
# setParameterised( Parameterised p )
# Directly sets the held object to the Parameterised instance p. Note that this
# form doesn't provide enough information for the node to be reinstantiated
# after saving and reloading of the maya scene - see the form below for that.
# Also note that this form is not undoable, and that the undoable parameter will therefore
# be ignored.
#
# setParameterised( string className, int classVersion, string searchPathEnvVar, bool undoable )
# Sets the held object by specifying a class that will be loaded using the IECore.ClassLoader.
# searchPathEnvVar specifies an environment variable which holds a colon separated search path for the
# ClassLoader. This form allows the held class to be reinstantiated across scene save/load, and is
# also undoable if requested using the undoable parameter. If classVersion is omitted, None, or negative,
# then the highest available version will be used.
def setParameterised( self, classNameOrParameterised, classVersion=None, envVarName=None, undoable=True ) :
if isinstance( classNameOrParameterised, str ) :
if classVersion is None or classVersion < 0 :
classVersions = IECore.ClassLoader.defaultLoader( envVarName ).versions( classNameOrParameterised )
classVersion = classVersions[-1] if classVersions else 0
if undoable :
if self.getParameterised()[0] :
self.setParameterisedValues()
_IECoreMaya._parameterisedHolderAssignModificationState(
self.getParameterised()[0].parameters().getValue().copy(),
self._classParameterStates(),
None,
None
)
else :
_IECoreMaya._parameterisedHolderAssignModificationState( None, None, None, None )
maya.cmds.ieParameterisedHolderModification( self.fullPathName(), classNameOrParameterised, classVersion, envVarName )
# no need to despatch callbacks as that is done by the command, so that the callbacks happen on undo and redo too.
else :
_IECoreMaya._parameterisedHolderSetParameterised( self, classNameOrParameterised, classVersion, envVarName )
self._despatchSetParameterisedCallbacks( self.fullPathName() )
else :
result = _IECoreMaya._parameterisedHolderSetParameterised( self, classNameOrParameterised )
self._despatchSetParameterisedCallbacks( self.fullPathName() )
## Returns a tuple of the form (parameterised, className, classVersion, searchPathEnvVar).
# The returned parameterised object is not guaranteed to be in sync with the plug values.
# Use setParameterisedValues function if you need that.
def getParameterised( self ) :
return _IECoreMaya._parameterisedHolderGetParameterised( self )
## Returns a context manager for use with the with statement. This can be used to
# scope edits to Parameter values (including the classes held by ClassParameters and
# ClassVectorParameters) in such a way that they are automatically transferred onto
# the maya attributes and furthermore in an undoable fashion.
def parameterModificationContext( self ) :
return _ParameterModificationContext( self )
## Sets the values of the plugs representing the parameterised object,
# using the current values of the parameters. If the undoable parameter is True
# then this method is undoable using the standard maya undo mechanism.
# \note If this is applied to a node in a reference, then reference edits will
# be produced for every parameter plug, even if the values are not changing.
# You may prefer to set parameter values within a parameterModificationContext()
# instead as this automatically transfers the values to maya, while also avoiding
# the reference edit problem.
def setNodeValues( self, undoable=True ) :
if undoable :
maya.cmds.ieParameterisedHolderSetValue( self.fullPathName() )
else :
_IECoreMaya._parameterisedHolderSetNodeValues( self )
## Set the value for the plug representing parameter, using the current
# value of the parameter. If the undoable parameter is True
# then this method is undoable using the standard maya undo mechanism.
def setNodeValue( self, parameter, undoable=True ) :
if undoable :
maya.cmds.ieParameterisedHolderSetValue( self.fullPathName(), plug=self.parameterPlug( parameter ).partialName() )
else :
_IECoreMaya._parameterisedHolderSetNodeValue( self, parameter )
## Transfers the values from the plugs of the node onto the
# parameters of the held Parameterised object.
def setParameterisedValues( self ) :
return _IECoreMaya._parameterisedHolderSetParameterisedValues( self )
## Sets the value of parameter from the value held by the plug representing it.
def setParameterisedValue( self, parameter ) :
return _IECoreMaya._parameterisedHolderSetParameterisedValue( self, parameter )
## Returns the OpenMaya.MPlug object responsible for representing the given parameter.
def parameterPlug( self, parameter ) :
plugName = _IECoreMaya._parameterisedHolderParameterPlug( self, parameter )
if plugName == "" :
return maya.OpenMaya.MPlug()
return StringUtil.plugFromString( self.fullPathName() + "." + plugName )
## Returns a string containing a full pathname for the plug representing the given parameter.
def parameterPlugPath( self, parameter ) :
plugName = _IECoreMaya._parameterisedHolderParameterPlug( self, parameter )
if not plugName :
return ""
return self.fullPathName() + "." + plugName
## Returns the IECore.Parameter object being represented by the given fullPathName
# of the maya plug or its OpenMaya.MPlug instance.
def plugParameter( self, plug ) :
if isinstance( plug, str ) or isinstance( plug, unicode ) :
plug = StringUtil.plugFromString( plug )
return _IECoreMaya._parameterisedHolderPlugParameter( self, plug )
## Returns the full path name to this node.
def fullPathName( self ) :
try :
f = maya.OpenMaya.MFnDagNode( self.object() )
return f.fullPathName()
except :
pass
return self.name()
## Add a callback which will be invoked whenever FnParameterisedHolder.setParameterised
# is called. The expected function signature is callback( FnParameterisedHolder ).
@classmethod
def addSetParameterisedCallback( cls, callback ) :
cls.__setParameterisedCallbacks.add( callback )
## Remove a previously added callback.
@classmethod
def removeSetParameterisedCallback( cls, callback ) :
cls.__setParameterisedCallbacks.remove( callback )
__setParameterisedCallbacks = set()
@classmethod
def _despatchSetParameterisedCallbacks( cls, nodeName ) :
fnPH = FnParameterisedHolder( nodeName )
for c in cls.__setParameterisedCallbacks :
c( fnPH )
## Adds a callback which will be invoked whenever FnParameterisedHolder.setClassVectorParameterClasses
# is called. The expected function signature is callback( FnParameterisedHolder, parameter )
@classmethod
def addSetClassVectorParameterClassesCallback( cls, callback ) :
cls.__setClassVectorParameterClassesCallbacks.add( callback )
## Removes a callback added previously with addSetClassVectorParameterClassesCallback()
@classmethod
def removeSetClassVectorParameterClassesCallback( cls, callback ) :
cls.__setClassVectorParameterClassesCallbacks.remove( callback )
__setClassVectorParameterClassesCallbacks = set()
# Invoked by the ieParameterisedHolderModification MPxCommand. It must be invoked from there
# rather than the methods above so that callbacks get correctly despatched during undo and redo.
@classmethod
def _despatchSetClassVectorParameterClassesCallbacks( cls, plugPath ) :
# This function gets called deferred (on idle) from ParameterisedHolderSetClassParameterCmd.cpp.
# Because of the deferred nature of the call, it's possible that the plug has been destroyed before
# we're called - in this case we just don't despatch callbacks.
## \todo It might be better to not defer the call to this function, and have any callbacks which
# need deferred evaluation (the ui callbacks in ClassVectorParameterUI for instance) arrange for that
# themselves.
if not maya.cmds.objExists( plugPath ) :
return
fnPH = FnParameterisedHolder( StringUtil.nodeFromAttributePath( plugPath ) )
parameter = fnPH.plugParameter( plugPath )
for c in cls.__setClassVectorParameterClassesCallbacks :
c( fnPH, parameter )
## Adds a callback which will be invoked whenever FnParameterisedHolder.setClassParameterClass
# is called. The expected function signature is callback( FnParameterisedHolder, parameter )
@classmethod
def addSetClassParameterClassCallback( cls, callback ) :
cls.__setClassParameterClassCallbacks.add( callback )
## Removes a callback added previously with addSetClassParameterClassCallback()
@classmethod
def removeSetClassParameterClassCallback( cls, callback ) :
cls.__setClassParameterClassCallbacks.remove( callback )
__setClassParameterClassCallbacks = set()
# Invoked by the ieParameterisedHolderModification MPxCommand. It must be invoked from there
# rather than the methods above so that callbacks get correctly despatched during undo and redo.
@classmethod
def _despatchSetClassParameterClassCallbacks( cls, plugPath ) :
# See comment in _despatchSetClassVectorParameterClassesCallbacks
if not maya.cmds.objExists( plugPath ) :
return
fnPH = FnParameterisedHolder( StringUtil.nodeFromAttributePath( plugPath ) )
parameter = fnPH.plugParameter( plugPath )
for c in cls.__setClassParameterClassCallbacks :
c( fnPH, parameter )
def _classParameterStates( self, parameter=None, parentParameterPath="", result=None ) :
if result is None :
result = IECore.CompoundData()
if parameter is None :
parameter = self.getParameterised()[0].parameters()
parameterPath = parameter.name
if parentParameterPath :
parameterPath = parentParameterPath + "." + parameterPath
if isinstance( parameter, IECore.ClassParameter ) :
classInfo = parameter.getClass( True )
result[parameterPath] = IECore.CompoundData( {
"className" : IECore.StringData( classInfo[1] ),
"classVersion" : IECore.IntData( classInfo[2] ),
"searchPathEnvVar" : IECore.StringData( classInfo[3] ),
} )
elif isinstance( parameter, IECore.ClassVectorParameter ) :
classInfo = parameter.getClasses( True )
if classInfo :
classInfo = zip( *classInfo )
else :
classInfo = [ [], [], [], [] ]
result[parameterPath] = IECore.CompoundData({
"parameterNames" : IECore.StringVectorData( classInfo[1] ),
"classNames" : IECore.StringVectorData( classInfo[2] ),
"classVersions" : IECore.IntVectorData( classInfo[3] ),
} )
if isinstance( parameter, IECore.CompoundParameter ) :
for c in parameter.values() :
self._classParameterStates( c, parameterPath, result )
return result
## Returns the maya node type that this function set operates on
@classmethod
def _mayaNodeType( cls ):
return "ieParameterisedHolderNode"
## Lists the ieParameterisedHolderNodes in the current scene. The keyword arguments operate as follows :
#
# selection :
# Only list holders in the current selection. Defaults to False
#
# fnSets :
# Returns a list of FnParameterisedHolder instances if True, otherwise returns node names. Defaults to True
#
# classType :
# Python class: if specified, only lists holders holding this class
#
@classmethod
def ls( cls, selection=False, fnSets=True, classType=None ) :
nodeNames = maya.cmds.ls( sl=selection, leaf=True, type=cls._mayaNodeType() )
matches = []
for n in nodeNames :
fnH = cls( n )
if classType is None or isinstance( fnH.getParameterised()[0], classType ) :
matches.append( fnH )
if fnSets :
return matches
else :
return [ x.fullPathName() for x in matches ]
class _ParameterModificationContext :
def __init__( self, fnPH ) :
self.__fnPH = fnPH
def __enter__( self ) :
self.__fnPH.setParameterisedValues()
self.__originalValues = self.__fnPH.getParameterised()[0].parameters().getValue().copy()
self.__originalClasses = self.__fnPH._classParameterStates()
return self.__fnPH.getParameterised()[0]
def __exit__( self, type, value, traceBack ) :
_IECoreMaya._parameterisedHolderAssignModificationState(
self.__originalValues,
self.__originalClasses,
self.__fnPH.getParameterised()[0].parameters().getValue().copy(),
self.__fnPH._classParameterStates(),
)
maya.cmds.ieParameterisedHolderModification( self.__fnPH.fullPathName() )
|
resotocore/tests/resotocore/model/typed_model_test.py
|
someengineering/resoto
| 126 |
141114
|
<filename>resotocore/tests/resotocore/model/typed_model_test.py
import abc
from datetime import datetime
from deepdiff import DeepDiff
from frozendict import frozendict
from resotocore.model.typed_model import from_js, to_js
from resotocore.query.model import Query, P
from resotocore.query.query_parser import parse_query
from resotocore.task.task_description import ExecuteOnCLI
class ModelBase(abc.ABC):
def __init__(self, identity: int):
self.identity = identity
class ModelFoo(ModelBase):
def __init__(self, identity: int, a: str, b: int):
super().__init__(identity)
self.a = a
self.b = b
class ModelBar(ModelFoo):
def __init__(self, identity: int, a: str, b: int, foo: str, bla: datetime):
super().__init__(identity, a, b)
self.foo = foo
self.bla = bla
def test_json_marshalling_works() -> None:
m = ModelFoo(1, "some foo", 23)
js = to_js(m)
js["identity"] = 1
js["a"] = "some foo"
js["b"] = 23
again = from_js(js, ModelFoo)
d = DeepDiff(m, again, truncate_datetime="second")
assert len(d) == 0
def test_ignore_private_properties() -> None:
m = ModelFoo(1, "some foo", 23)
m.__some_private_prop = 23 # type: ignore
m.__some_other_dunder = "foo" # type: ignore
js = to_js(m)
assert len(js) == 3
def test_marshal_query() -> None:
q = Query.by("ec2", P("foo") > 23, P("test") >= "bummer", P("das") < "set")
again = parse_query(str(q))
assert str(q) == str(again)
def test_frozen_dict() -> None:
res = ExecuteOnCLI("test", frozendict({"test": "foo"}))
assert res == from_js(to_js(res), ExecuteOnCLI)
|
PyFin/tests/Analysis/testCrossSectionValueHolders.py
|
rpatil524/Finance-Python
| 325 |
141138
|
<filename>PyFin/tests/Analysis/testCrossSectionValueHolders.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
u"""
Created on 2017-1-6
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from PyFin.Enums import Factors
from PyFin.Analysis.SecurityValueHolders import SecurityLatestValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSRankedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageAdjustedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSZScoreSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSFillNASecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSResidueSecurityValueHolder
class TestCrossSectionValueHolder(unittest.TestCase):
def setUp(self):
np.random.seed(0)
sample1 = np.random.randn(1000, 6)
sample2 = np.random.randn(1000, 6)
self.datas = {'aapl': {'close': sample1[:, 0], 'open': sample1[:, 1]},
'ibm': {'close': sample2[:, 0], 'open': sample2[:, 1]},
'goog': {'close': sample1[:, 2], 'open': sample1[:, 3]},
'baba': {'close': sample2[:, 2], 'open': sample2[:, 3]},
'tela': {'close': sample1[:, 4], 'open': sample1[:, 5]},
'nflx': {'close': sample2[:, 4], 'open': sample2[:, 5]}
}
def testCSRankedSecurityValueHolderWithSymbolName(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder('close')
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSRankedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSTopNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSTopNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((-benchmarkValues).rank().values <= n, topnHolder.value.values)
def testCSBottomNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSBottomNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values <= n, topnHolder.value.values)
def testCSRankedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
rankHolder = CSRankedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups).rank().values
np.testing.assert_array_almost_equal(expected_rank, rankHolder.value.values)
def testCSAverageSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanHolder = CSAverageSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.values.mean(), meanHolder.value.values)
def testCSAverageSecurityValueHolderWithGroup(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanHolder = CSAverageSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_mean = pd.Series(benchmarkValues.to_dict()).groupby(groups).mean()
calculated_mean = meanHolder.value
for name in calculated_mean.index():
if name in ['aapl', 'ibm']:
self.assertAlmostEqual(calculated_mean[name], expected_mean[1])
else:
self.assertAlmostEqual(calculated_mean[name], expected_mean[2])
def testCSPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
perHolder = CSPercentileSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values / len(data), perHolder.value.values)
def testCSTopNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSTopNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(((-benchmarkValues).rank().values / len(data)) <= n,
perHolder.value.values)
def testCSBottomNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSBottomNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues.rank().values / len(data)) <= n,
perHolder.value.values)
def testCSPercentileSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
perHolder = CSPercentileSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x.rank().values / len(x))
np.testing.assert_array_almost_equal(expected_rank, perHolder.value.values)
def testCSAverageAdjustedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues - benchmarkValues.mean()).values, meanAdjustedHolder.value.values)
def testCSAverageAdjustedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x - x.mean())
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolder(self):
keys = list(range(1, 11))
values = list(range(10, 0, -1))
data = {}
for i, k in enumerate(keys):
data[k] = {}
data[k]['close'] = values[i]
quantile_value = CSZScoreSecurityValueHolder('close')
quantile_value.push(data)
calculated = quantile_value.value
data = np.linspace(10., 1., 10)
expected = (data - data.mean()) / data.std()
np.testing.assert_array_almost_equal(expected, calculated.values)
def testCSFillNASecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSFillNASecurityValueHolder(benchmark, groups)
def cal_func(x):
x[np.isnan(x)] = np.nanmean(x)
return x
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'tela': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['tela'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.},
'nflx': {Factors.CLOSE: self.datas['nflx'][Factors.CLOSE][i],
Factors.OPEN: self.datas['nflx'][Factors.OPEN][i],
'ind': 2.}
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'tela': 1., 'goog': 2., 'baba': 2., 'nflx': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(cal_func)
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSZScoreSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: (x - x.mean()) / x.std(ddof=0))
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZResidueSecurityValueHolder(self):
y = SecurityLatestValueHolder(x='close')
x = SecurityLatestValueHolder(x='open')
res = CSResidueSecurityValueHolder(y, x)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
y.push(data)
x.push(data)
res.push(data)
calculated = res.value.values
y_values = y.value.values
x_values = x.value.values
x_values = np.concatenate([np.ones(shape=(len(x_values), 1)), x_values.reshape(-1, 1)], axis=1)
beta = np.dot(np.linalg.inv(np.dot(x_values.T, x_values)), np.dot(x_values.T, y_values.reshape(-1, 1)))
expected = y_values - np.dot(x_values, beta).flatten()
np.testing.assert_array_almost_equal(calculated, expected)
|
cogdl/data/dataloader.py
|
li-ziang/cogdl
| 1,072 |
141168
|
<reponame>li-ziang/cogdl<filename>cogdl/data/dataloader.py
from abc import ABCMeta
import torch.utils.data
from torch.utils.data.dataloader import default_collate
from cogdl.data import Batch, Graph
try:
from typing import GenericMeta # python 3.6
except ImportError:
# in 3.7, genericmeta doesn't exist but we don't need it
class GenericMeta(type):
pass
class RecordParameters(ABCMeta):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.record_parameters([args, kwargs])
return obj
class GenericRecordParameters(GenericMeta, RecordParameters):
pass
class DataLoader(torch.utils.data.DataLoader, metaclass=GenericRecordParameters):
r"""Data loader which merges data objects from a
:class:`cogdl.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How may samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`True`)
"""
def __init__(self, dataset, batch_size=1, shuffle=True, **kwargs):
if "collate_fn" not in kwargs or kwargs["collate_fn"] is None:
kwargs["collate_fn"] = self.collate_fn
super(DataLoader, self).__init__(
dataset,
batch_size,
shuffle,
**kwargs,
)
@staticmethod
def collate_fn(batch):
item = batch[0]
if isinstance(item, Graph):
return Batch.from_data_list(batch)
elif isinstance(item, torch.Tensor):
return default_collate(batch)
elif isinstance(item, float):
return torch.tensor(batch, dtype=torch.float)
raise TypeError("DataLoader found invalid type: {}".format(type(item)))
def get_parameters(self):
return self.default_kwargs
def record_parameters(self, params):
self.default_kwargs = params
|
cliff/tests/test_formatters_csv.py
|
tivaliy/cliff
| 187 |
141170
|
<filename>cliff/tests/test_formatters_csv.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import io
import unittest
from unittest import mock
from cliff.formatters import commaseparated
from cliff.tests import test_columns
class TestCSVFormatter(unittest.TestCase):
def test_commaseparated_list_formatter(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'a,b,c\nA,B,C\nD,E,F\n'
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_quoted(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = '"a","b","c"\n"A","B","C"\n"D","E","F"\n'
output = io.StringIO()
# Parse arguments as if passed on the command-line
parser = argparse.ArgumentParser(description='Testing...')
sf.add_argument_group(parser)
parsed_args = parser.parse_args(['--quote', 'all'])
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_formattable_column(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = 'a,b,c\nA,B,[\'the\'\\, \'value\']\n'
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_unicode(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
happy = '高兴'
d2 = ('D', 'E', happy)
data = [d1, d2]
expected = 'a,b,c\nA,B,C\nD,E,%s\n' % happy
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
|
k2/python/host/tests/properties_test.py
|
EmreOzkose/k2
| 491 |
141175
|
#!/usr/bin/env python3
#
# Copyright 2020 Xiaomi Corporation (author: <NAME>)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R host_properties_test_py
#
import unittest
import torch
import k2host
class TestIsValid(unittest.TestCase):
def test_bad_case1(self):
# fsa should contain at least two states
array_size = k2host.IntArray2Size(1, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertFalse(k2host.is_valid(fsa))
def test_bad_case2(self):
# only kFinalSymbol arcs enter the final state
s = r'''
0 1 0 0
0 2 1 0
1 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_valid(fsa))
def test_bad_case3(self):
# `arc_indexes` and `arcs` in this state are not consistent
arc_indexes = torch.IntTensor([0, 2, 2, 2])
arcs = torch.IntTensor([[0, 1, 0, 0], [0, 2, 1, 0], [1, 2, 0, 0]])
fsa = k2host.Fsa(arc_indexes, arcs)
self.assertFalse(k2host.is_valid(fsa))
def test_good_cases1(self):
# empty fsa is valid
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_valid(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
2 3 -1 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_valid(fsa))
def test_good_case3(self):
s = r'''
0 1 0 0
0 2 -1 0
1 2 -1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_valid(fsa))
class TestIsTopSorted(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 0 0
0 2 0 0
2 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_top_sorted(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_top_sorted(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_top_sorted(fsa))
class TestIsArcSorted(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 1 0
0 2 2 0
1 2 2 0
1 3 1 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_arc_sorted(fsa))
def test_bad_cases2(self):
# same label on two arcs
s = r'''
0 2 0 0
0 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_arc_sorted(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_arc_sorted(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 1 0
1 3 2 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_arc_sorted(fsa))
class TestHasSelfLoops(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.has_self_loops(fsa))
def test_bad_cases2(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertFalse(k2host.has_self_loops(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
1 2 0 0
1 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.has_self_loops(fsa))
class TestIsDeterministic(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
1 2 0 0
1 3 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_deterministic(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_deterministic(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
1 2 0 0
1 3 2 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_deterministic(fsa))
class TestIsEpsilonFree(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
0 2 0 0
1 2 1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_epsilon_free(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_epsilon_free(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
0 2 1 0
1 2 1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_epsilon_free(fsa))
class TestIsConnected(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_connected(fsa))
def test_bad_cases2(self):
s = r'''
0 1 0 0
0 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_connected(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_connected(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 3 0 0
1 2 0 0
2 3 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_connected(fsa))
def test_good_case3(self):
s = r'''
0 3 0 0
1 2 0 0
2 3 0 0
2 3 0 0
2 4 0 0
3 1 0 0
4
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_connected(fsa))
class TestIsAcyclic(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
0 4 0 0
0 2 0 0
1 2 1 0
1 3 0 0
2 1 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_acyclic(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_acyclic(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
0 2 1 0
1 2 0 0
1 3 5 0
2 3 6 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_acyclic(fsa))
class TestIsEmpty(unittest.TestCase):
def test_good_cases1(self):
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_empty(fsa))
def test_bad_case1(self):
s = r'''
0 1 2 0
1
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_empty(fsa))
if __name__ == '__main__':
unittest.main()
|
lit_nlp/lib/utils.py
|
eichinflo/lit
| 2,854 |
141180
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Miscellaneous helper functions."""
import copy
import itertools
import queue
import threading
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Sequence, TypeVar, Union
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
def coerce_bool(value) -> bool:
if isinstance(value, (bool, int, float, list, dict)):
return bool(value)
elif value is None:
return False
elif str(value).lower() in ['', '0', 'false']:
return False
else:
return True
def find_keys(d: Dict[K, V], predicate: Callable[[V], bool]) -> List[K]:
"""Find keys where values match predicate."""
return [k for k, v in d.items() if predicate(v)]
def find_spec_keys(d: Dict[K, Any], types) -> List[K]:
"""Find keys where values match one or more types."""
return find_keys(d, lambda v: isinstance(v, types))
def filter_by_keys(d: Dict[K, V], predicate: Callable[[K], bool]) -> Dict[K, V]:
"""Filter to keys matching predicate."""
return {k: v for k, v in d.items() if predicate(k)}
def copy_and_update(d: Dict[K, Any], patch: Dict[K, Any]) -> Dict[K, Any]:
"""Make a copy of d and apply the patch to a subset of fields."""
ret = copy.copy(d)
ret.update(patch)
return ret
def remap_dict(d: Dict[K, V], keymap: Dict[K, K]) -> Dict[K, V]:
"""Return a (shallow) copy of d with some fields renamed.
Keys which are not in keymap are left alone.
Args:
d: dict to rename
keymap: map of old key -> new key
Returns:
new dict with fields renamed
"""
return {keymap.get(k, k): d[k] for k in d}
def rate_limit(iterable, qps: Union[int, float]):
"""Rate limit an iterator."""
for item in iterable:
yield item
time.sleep(1.0 / qps)
def batch_iterator(items: Iterable[T],
max_batch_size: int) -> Iterator[List[T]]:
"""Create batches from an input stream.
Use this to create batches, e.g. to feed to a model.
The output can be easily flattened again using itertools.chain.from_iterable.
Args:
items: stream of items
max_batch_size: maximum size of resulting batches
Yields:
batches of size <= max_batch_size
"""
minibatch = []
for item in items:
if len(minibatch) < max_batch_size:
minibatch.append(item)
if len(minibatch) >= max_batch_size:
yield minibatch
minibatch = []
if len(minibatch) > 0: # pylint: disable=g-explicit-length-test
yield minibatch
def batch_inputs(input_records: Sequence[Dict[K, V]]) -> Dict[K, List[V]]:
"""Batch inputs from list-of-dicts to dict-of-lists."""
assert input_records, 'Must have non-empty batch!'
ret = {}
for k in input_records[0]:
ret[k] = [r[k] for r in input_records]
return ret
def _extract_batch_length(preds):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in preds.items():
this_length = (
len(value) if isinstance(value, (list, tuple)) else value.shape[0])
batch_length = batch_length or this_length
if this_length != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def unbatch_preds(preds):
"""Unbatch predictions, as in estimator.predict().
Args:
preds: Dict[str, np.ndarray], where all arrays have the same first
dimension.
Yields:
sequence of Dict[str, np.ndarray], with the same keys as preds.
"""
if not isinstance(preds, dict):
for pred in preds:
yield pred
else:
for i in range(_extract_batch_length(preds)):
yield {key: value[i] for key, value in preds.items()}
def find_all_combinations(l: List[Any], min_element_count: int,
max_element_count: int) -> List[List[Any]]:
"""Finds all possible ways how elements of a list can be combined.
E.g., all combinations of list [1, 2, 3] are
[[1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]].
Args:
l: a list of arbitrary elements.
min_element_count: the minimum number of elements that every combination
should contain.
max_element_count: the maximum number of elements that every combination
should contain.
Returns:
The list of all possible combinations given the constraints.
"""
result: List[List[Any]] = []
min_element_count = max(1, min_element_count)
max_element_count = min(max_element_count, len(l))
for element_count in range(min_element_count, max_element_count + 1):
result.extend(list(x) for x in itertools.combinations(l, element_count))
return result
class TaskQueue(queue.Queue):
"""A simple task queue for processing jobs in a thread pool."""
def __init__(self, num_workers=1):
# TODO(lit-dev): Could use QueueHandler and QueueListener for this.
queue.Queue.__init__(self)
self.num_workers = num_workers
self.start_workers()
def add_task(self, task, *args, **kwargs):
args = args or ()
kwargs = kwargs or {}
self.put((task, args, kwargs))
def start_workers(self):
for _ in range(self.num_workers):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
def worker(self):
while True:
item, args, kwargs = self.get()
item(*args, **kwargs)
self.task_done()
|
src/opendr/perception/face_recognition/algorithm/util/iterator.py
|
makistsantekidis/opendr
| 217 |
141206
|
<reponame>makistsantekidis/opendr
from opendr.engine.datasets import DatasetIterator
import torch
import numpy as np
class FaceRecognitionDataset(DatasetIterator):
def __init__(self, pairs):
self.data = np.array([pairs[:, 0], pairs[:, 1]])
self.labels = pairs[:, 2]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image1 = self.data[0][idx]
image2 = self.data[1][idx]
label = self.labels[idx]
sample = {'image1': image1, 'image2': image2, 'label': label}
return sample
def __len__(self):
return len(self.labels)
|
testproject/django_file_form_example/tests/utils/test_utils.py
|
kosior/django-file-form
| 133 |
141211
|
<reponame>kosior/django-file-form
import uuid
from datetime import datetime
from json import dumps
from pathlib import Path
from django.utils import timezone
from django.conf import settings
from django_file_form_example.models import Example, ExampleFile
def get_random_id():
return uuid.uuid4().hex
def encode_datetime(*args, **kwargs):
"""
Return a valid datetime.
- depends on timezone settings
"""
naive_datime = datetime(*args, **kwargs)
if settings.USE_TZ:
return timezone.make_aware(naive_datime, timezone.get_current_timezone())
else:
return naive_datime # pragma: no cover
def remove_p(path):
if path.exists():
path.unlink()
def to_class_string(classes):
return "".join(f".{v}" for v in classes if v)
def read_file(file):
try:
return file.read()
finally:
file.close()
def write_json(path, data):
json = dumps(data)
Path(path).write_text(json)
def count_temp_uploads():
temp_uploads_path = Path(settings.MEDIA_ROOT).joinpath("temp_uploads")
return len(list(temp_uploads_path.iterdir()))
def remove_example_file(filename):
Path(settings.MEDIA_ROOT).joinpath("example").joinpath(filename).unlink()
def has_files(path: Path):
files = list(entry for entry in path.iterdir() if entry.is_file())
return len(files) != 0
def remove_empty_subdirectories(path: Path) -> object:
for entry in path.iterdir():
if entry.is_dir() and not has_files(entry):
remove_empty_subdirectories(entry)
entry.rmdir()
def remove_test_files():
for example in Example.objects.all():
example.input_file.delete()
for example_file in ExampleFile.objects.all():
example_file.input_file.delete()
temp_uploads = Path(settings.MEDIA_ROOT).joinpath("temp_uploads")
if temp_uploads.exists():
for temp_file in temp_uploads.iterdir():
temp_file.unlink()
example_uploads = Path(settings.MEDIA_ROOT).joinpath("example")
remove_empty_subdirectories(example_uploads)
|
app/settings.py
|
priyanshu-kumar02/personfinder
| 561 |
141212
|
<gh_stars>100-1000
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for Django settings."""
import os
import const
import site_settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# If we actually did anything that used the secret key we'd need to set it to
# some constant value and find a way to secretly store it. However, we don't use
# it for anything. We need to set it to something to make Django happy though,
# and we set it to something random to be safe in case we unknowingly do
# something in the future that uses it (better to have a password reset token
# break because this changed or something like that than a security hole we
# don't know about).
SECRET_KEY = os.urandom(30)
# Check if we're running a local development server or in prod.
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development'):
DEBUG = True
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = ['*']
SECURE_SSL_REDIRECT = False
else:
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
ALLOWED_HOSTS = site_settings.PROD_ALLOWED_HOSTS
SECURE_SSL_REDIRECT = True
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware',
]
ROOT_URLCONF = 'urls'
# By default, if a URL can't be resolved and doesn't end in a slash, Django will
# issue a redirect to the same URL with a slash. We'd rather not issue
# unecessary redirects, so we just put optional trailing slashes in the URL
# configuration.
APPEND_SLASH = False
# App Engine issues HTTP requests for tasks, so we don't force HTTPS for them.
SECURE_REDIRECT_EXEMPT = [r'^.*/tasks/.*']
if site_settings.OPTIONAL_PATH_PREFIX:
SECURE_REDIRECT_EXEMPT += [
r'^%s/.*/tasks/.*' % site_settings.OPTIONAL_PATH_PREFIX]
# Based on the Strict CSP example here:
# https://csp.withgoogle.com/docs/strict-csp.html
CSP_INCLUDE_NONCE_IN = ('script-src', 'style-src')
CSP_BASE_URI = "'none'"
CSP_OBJECT_SRC = "'none'"
CSP_SCRIPT_SRC = ("'unsafe-inline'", "'unsafe-eval'",
"'strict-dynamic' https: http:",)
CSP_STYLE_SRC = ("'unsafe-inline'", "'unsafe-eval'",
"'strict-dynamic' https: http:",)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['resources'],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Internationalization
LANGUAGE_CODE = const.DEFAULT_LANGUAGE_CODE
LANGUAGES_BIDI = ['ar', 'he', 'fa', 'iw', 'ur']
USE_I18N = True
USE_L10N = True
LOCALE_PATHS = ['locale']
TIME_ZONE = 'UTC'
USE_TZ = True
# Static files
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'resources/static/fixed'),
]
|
Chapter08/1imputation.py
|
karim7262/Python-Machine-Learning-By-Example
| 106 |
141229
|
import numpy as np
from sklearn.preprocessing import Imputer
# Represent the unknown value by np.nan in numpy
data_origin = [[30, 100],
[20, 50],
[35, np.nan],
[25, 80],
[30, 70],
[40, 60]]
# Imputation with the mean value
imp_mean = Imputer(missing_values='NaN', strategy='mean')
imp_mean.fit(data_origin)
data_mean_imp = imp_mean.transform(data_origin)
print(data_mean_imp)
# Imputation with the median value
imp_median = Imputer(missing_values='NaN', strategy='median')
imp_median.fit(data_origin)
data_median_imp = imp_median.transform(data_origin)
print(data_median_imp)
# New samples
new = [[20, np.nan],
[30, np.nan],
[np.nan, 70],
[np.nan, np.nan]]
new_mean_imp = imp_mean.transform(new)
print(new_mean_imp)
# Effects of discarding missing values and imputation
from sklearn import datasets
dataset = datasets.load_diabetes()
X_full, y = dataset.data, dataset.target
# Simulate a corrupted data set by adding 25% missing values
m, n = X_full.shape
m_missing = int(m * 0.25)
print(m, m_missing)
# Randomly select m_missing samples
np.random.seed(42)
missing_samples = np.array([True] * m_missing + [False] * (m - m_missing))
np.random.shuffle(missing_samples)
# For each missing sample, randomly select 1 out of n features
missing_features = np.random.randint(low=0, high=n, size=m_missing)
# Represent missing values by nan
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = np.nan
# Discard samples containing missing values
X_rm_missing = X_missing[~missing_samples, :]
y_rm_missing = y[~missing_samples]
# Estimate R^2 on the data set with missing samples removed
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
score_rm_missing = cross_val_score(regressor, X_rm_missing, y_rm_missing).mean()
print('Score with the data set with missing samples removed: {0:.2f}'.format(score_rm_missing))
# Imputation with mean value
imp_mean = Imputer(missing_values='NaN', strategy='mean')
X_mean_imp = imp_mean.fit_transform(X_missing)
# Estimate R^2 on the data set with missing samples removed
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean()
print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp))
# Estimate R^2 on the full data set
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=500)
score_full = cross_val_score(regressor, X_full, y).mean()
print('Score with the full data set: {0:.2f}'.format(score_full))
# # Imputation with median value
# imp_mean = Imputer(missing_values='NaN', strategy='median')
# X_mean_imp = imp_mean.fit_transform(X_missing)
# # Estimate R^2 on the data set with missing samples removed
# regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
# score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean()
# print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp))
|
tests/autobahn_test.py
|
andreasbossard/deutschland
| 445 |
141231
|
from pprint import pprint
from deutschland import autobahn
from deutschland.autobahn.api import default_api
autobahn_api_instance = default_api.DefaultApi()
try:
# Auflistung aller Autobahnen
api_response = autobahn_api_instance.list_autobahnen()
pprint(api_response)
# Details zu einer Ladestation
station_id = "RUxFQ1RSSUNfQ0hBUkdJTkdfU1RBVElPTl9fMTczMzM=" # str |
api_response = autobahn_api_instance.get_charging_station(station_id)
pprint(api_response)
except autobahn.ApiException as e:
print("Exception when calling DefaultApi->get_charging_station: %s\n" % e)
|
common/utils/manopth/test/test_demo.py
|
Alan-delete/I2L-MeshNet_RELEASE
| 544 |
141233
|
import torch
from manopth.demo import generate_random_hand
def test_generate_random_hand():
batch_size = 3
hand_info = generate_random_hand(batch_size=batch_size, ncomps=6)
verts = hand_info['verts']
joints = hand_info['joints']
assert verts.shape == (batch_size, 778, 3)
assert joints.shape == (batch_size, 21, 3)
|
mode/examples/Contributed Libraries in Python/OpenCV/FindEdges/FindEdges.pyde
|
timgates42/processing.py
| 1,224 |
141247
|
add_library('opencv_processing')
src = loadImage("test.jpg")
size(src.width, src.height, P2D)
opencv = OpenCV(this, src)
opencv.findCannyEdges(20, 75)
canny = opencv.getSnapshot()
opencv.loadImage(src)
opencv.findScharrEdges(OpenCV.HORIZONTAL)
scharr = opencv.getSnapshot()
opencv.loadImage(src)
opencv.findSobelEdges(1, 0)
sobel = opencv.getSnapshot()
with pushMatrix():
scale(0.5)
image(src, 0, 0)
image(canny, src.width, 0)
image(scharr, 0, src.height)
image(sobel, src.width, src.height)
text("Source", 10, 25)
text("Canny", src.width / 2 + 10, 25)
text("Scharr", 10, src.height / 2 + 25)
text("Sobel", src.width / 2 + 10, src.height / 2 + 25)
|
dragonfly/gp/mf_gp.py
|
hase1128/dragonfly
| 675 |
141248
|
<reponame>hase1128/dragonfly
"""
Implements the kernel, GP and fitter for multi-fidelity GPs.
--<EMAIL>
"""
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=abstract-class-not-used
# Local imports
from . import kernel as gp_kernel
from .gp_core import GP, GPFitter, mandatory_gp_args
from ..utils.option_handler import load_options
from ..utils.reporters import get_reporter
from ..utils.ancillary_utils import get_list_of_floats_as_str
def get_ZX_from_ZZ_XX(ZZ, XX):
""" Get a combined representation for the fidelity and domian data. """
if hasattr(ZZ, '__iter__') and len(ZZ) == len(XX):
return [(z, x) for (z, x) in zip(ZZ, XX)]
else:
return (ZZ, XX)
class MFGP(GP):
""" A GP to be used in multi-fidelity settings. """
def __init__(self, ZZ, XX, YY, mf_kernel,
mean_func, noise_var, *args, **kwargs):
""" Constructor. ZZ, XX, YY are the fidelity points, domain points and labels
respectively.
mf_kernel is either a combined kernel or a namespace with the following fields:
{scale, fidel_kernel, domain_kernel}.
"""
self.ZZ = list(ZZ)
self.XX = list(XX)
self.YY = list(YY)
if not isinstance(mf_kernel, gp_kernel.Kernel):
kernel = gp_kernel.CartesianProductKernel(mf_kernel.scale,
[mf_kernel.fidel_kernel, mf_kernel.domain_kernel])
self.fidel_kernel = mf_kernel.fidel_kernel
self.domain_kernel = mf_kernel.domain_kernel
else:
kernel = mf_kernel
ZX = self.get_ZX_from_ZZ_XX(ZZ, XX) # The 'X' data
# Call super constructor
super(MFGP, self).__init__(ZX, YY, kernel, mean_func, noise_var, *args, **kwargs)
@classmethod
def get_ZX_from_ZZ_XX(cls, ZZ, XX):
""" Get a combined representation for the fidelity and domian data.
Can be overridden by a child class if there is a more efficient representation."""
return get_ZX_from_ZZ_XX(ZZ, XX)
def eval_at_fidel(self, ZZ_test, XX_test, *args, **kwargs):
""" Evaluates the GP at [ZZ_test, XX_test]. Read eval in gp_core.GP for more details.
"""
ZX_test = self.get_ZX_from_ZZ_XX(ZZ_test, XX_test)
return self.eval(ZX_test, *args, **kwargs)
def eval_at_fidel_with_hallucinated_observations(self, ZZ_test, XX_test,
ZZ_halluc, XX_halluc, *args, **kwargs):
""" Evaluates with hallucinated observations. """
ZX_test = self.get_ZX_from_ZZ_XX(ZZ_test, XX_test)
ZX_halluc = self.get_ZX_from_ZZ_XX(ZZ_halluc, XX_halluc)
return self.eval_with_hallucinated_observations(ZX_test, ZX_halluc, *args, **kwargs)
def set_mf_data(self, ZZ, XX, YY, build_posterior=True):
""" Sets the MF data for the GP. """
self.ZZ = list(ZZ)
self.XX = list(XX)
self.YY = list(YY)
ZX = self.get_ZX_from_ZZ_XX(ZZ, XX) # The 'X' data
super(MFGP, self).set_data(ZX, YY, build_posterior)
def add_mf_data_multiple(self, ZZ_new, XX_new, YY_new, *args, **kwargs):
""" Adds new data to the multi-fidelity GP. """
ZX_new = self.get_ZX_from_ZZ_XX(ZZ_new, XX_new)
self.ZZ.extend(ZZ_new)
self.XX.extend(XX_new)
self.add_data_multiple(ZX_new, YY_new, *args, **kwargs)
def add_mf_data_single(self, zz_new, xx_new, yy_new, *args, **kwargs):
""" Adds a single new data to the multi-fidelity GP. """
self.add_mf_data_multiple([zz_new], [xx_new], [yy_new], *args, **kwargs)
def draw_mf_samples(self, num_samples, ZZ_test=None, XX_test=None, *args, **kwargs):
""" Draws samples from a multi-fidelity GP. """
ZX_test = None if ZZ_test is None else self.get_ZX_from_ZZ_XX(ZZ_test, XX_test)
return self.draw_samples(num_samples, ZX_test, *args, **kwargs)
def get_fidel_kernel(self):
""" Return the fidel_space kernel. """
return self.fidel_kernel
def get_domain_kernel(self):
""" Return the domain kernel. """
return self.domain_kernel
def _child_str(self):
""" Returns a string representation of the MF-GP. """
if hasattr(self, 'fidel_kernel') and hasattr(self, 'domain_kernel'):
fidel_ke_str = self._get_kernel_str(self.fidel_kernel)
domain_ke_str = self._get_kernel_str(self.domain_kernel)
kernel_str = 'fid:: %s, dom:: %s'%(fidel_ke_str, domain_ke_str)
else:
kernel_str = str(self.kernel)
ret = 'scale: %0.3f, %s'%(self.kernel.hyperparams['scale'], kernel_str)
return ret
@classmethod
def _get_kernel_str(cls, kern):
""" Gets a string format of the kernel depending on whether it is SE/Poly. """
if isinstance(kern, gp_kernel.ExpDecayKernel):
ret = 'expd: offs=%0.3f, pow=%s'%(kern.hyperparams['offset'],
get_list_of_floats_as_str(kern.hyperparams['powers']))
elif isinstance(kern, gp_kernel.SEKernel) or isinstance(kern, gp_kernel.MaternKernel):
hp_name = 'dim_bandwidths'
kern_name = 'se' if isinstance(kern, gp_kernel.SEKernel) else \
'matern(%0.1f)'%(kern.hyperparams['nu'])
if kern.dim > 4:
ret = '%0.4f(avg)'%(kern.hyperparams[hp_name].mean())
else:
ret = get_list_of_floats_as_str(kern.hyperparams[hp_name])
ret = kern_name + ': ' + ret
elif isinstance(kern, gp_kernel.PolyKernel):
ret = 'poly: %s'%(get_list_of_floats_as_str(kern.hyperparams['dim_scalings']))
else: # Return an empty string.
ret = str(kern)
return ret
class MFGPFitter(GPFitter):
""" A GP Fitter for Multi-fidelity GPs. This is mostly a wrapper for MFGPs that
want to use GPFitter. All the heavy lifting is happening in GPFitter. """
# pylint: disable=abstract-method
def __init__(self, ZZ, XX, YY, options=None, reporter=None):
""" Constructor. """
reporter = get_reporter(reporter)
options = load_options(mandatory_gp_args, partial_options=options)
self.ZZ = ZZ
self.XX = XX
self.YY = YY
self.num_tr_data = len(self.YY)
ZX = get_ZX_from_ZZ_XX(ZZ, XX)
super(MFGPFitter, self).__init__(ZX, YY, options, reporter)
|
html/browsers/browsing-the-web/overlapping-navigations-and-traversals/tentative/resources/slow.py
|
meyerweb/wpt
| 14,668 |
141261
|
# Like /common/slow.py except with text/html content-type so that it won't
# trigger strange parts of the <iframe> navigate algorithm.
import time
def main(request, response):
time.sleep(2)
return 200, [["Content-Type", "text/html"]], b''
|
bmemcached/__init__.py
|
alexmv/python-binary-memcached
| 103 |
141265
|
__all__ = ('Client', 'ReplicatingClient', 'DistributedClient', )
from bmemcached.client import Client, ReplicatingClient, DistributedClient
|
SPOJ/Random/ONEZERO/test.py
|
VastoLorde95/Competitive-Programming
| 170 |
141279
|
mx = 0
for i in xrange(1, 100):
j = 1
while True:
num = i * j
s = str(num)
if s.count('1') + s.count('0') == len(s):
mx = max(mx, j)
break
j += 1
print mx
|
bscan/config.py
|
netsec/bscan
| 229 |
141302
|
<gh_stars>100-1000
"""Configuration initialization and handling."""
import os
import re
import shutil
import sys
import toml
from argparse import (
Namespace)
from collections import (
namedtuple)
from pkg_resources import (
resource_string)
from typing import (
Optional)
from bscan.errors import (
BscanConfigError)
from bscan.io_console import (
print_w_d2)
from bscan.io_files import (
dir_exists,
file_exists)
from bscan.runtime import (
db,
lock)
DEFAULT_WORDLIST_SEARCH_DIRS = [
'/usr/share/wordlists/',
'/usr/share/seclists/Passwords/']
PortScanConfig = namedtuple(
'PortScanConfig',
['name', 'pattern', 'scan'])
"""Encapsulation of data parsed from `port-scans.toml` file."""
def good_py_version() -> bool:
"""Verify that this program is being run with the expected version."""
return sys.version_info.major >= 3 and sys.version_info.minor >= 6
def py_version_str() -> str:
"""Get the running Python version as a string."""
return str(sys.version_info.major) + '.' + str(sys.version_info.minor)
def load_default_config_file(filename: str) -> str:
"""Packaged-friendly method to load contents of a default config file."""
try:
pyinst_basedir = getattr(sys, '_MEIPASS', None)
if pyinst_basedir is not None:
# load configuration from PyInstaller bundle
filepath = os.path.join(pyinst_basedir, 'configuration', filename)
with open(filepath, 'r') as f:
raw_contents = f.read()
else:
# load configuration from either Python wheel or the filesystem
raw_contents = resource_string(
__name__, 'configuration/' + filename).decode('utf-8')
except FileNotFoundError:
raise BscanConfigError(
'Unable to find default configuration file `' + filename + '`')
return raw_contents
def load_config_file(filename: str, base_dir: Optional[str]=None) -> str:
"""Load config file from specified base_dir, falling back on defaults."""
if base_dir is None:
return load_default_config_file(filename)
elif not dir_exists(base_dir):
print_w_d2('Specified `--output-dir` ', base_dir, ' does not exist, '
'falling back to default configuration file for ', filename)
return load_default_config_file(filename)
path = os.path.join(base_dir, filename)
if file_exists(path):
with open(path, 'r') as f:
return f.read()
else:
print_w_d2('File ', filename, ' not found in specified `--output-dir`'
', falling back to default configuration file for ',
filename)
return load_default_config_file(filename)
async def init_config(ns: Namespace) -> None:
"""Init configuration from default files and command-line arguments."""
async with lock:
# track targets being actively scanned
db['active-targets'] = set()
# --brute-pass-list
if ns.brute_pass_list is None:
db['brute-pass-list'] = '/usr/share/wordlists/fasttrack.txt'
else:
db['brute-pass-list'] = ns.brute_pass_list
if not ns.no_file_check and not file_exists(db['brute-pass-list']):
raise BscanConfigError(
'`--brute-pass-list` file ' + db['brute-pass-list'] +
' does not exist')
# --brute-user-list
if ns.brute_user_list is None:
db['brute-user-list'] = (
'/usr/share/wordlists/metasploit/namelist.txt')
else:
db['brute-user-list'] = ns.brute_user_list
if not ns.no_file_check and not file_exists(db['brute-user-list']):
raise BscanConfigError(
'`--brute-user-list` file ' + db['brute-user-list'] +
' does not exist')
# --cmd-print-width
try:
cmd_print_width = (80 if ns.cmd_print_width is None
else int(ns.cmd_print_width))
if cmd_print_width < 5:
raise ValueError
except ValueError:
raise BscanConfigError(
'Invalid `--cmd-print-width` value specified; must be an '
'integer greater than or equal to 5')
db['cmd-print-width'] = cmd_print_width
# --output-dir
if ns.output_dir is None:
db['output-dir'] = os.getcwd()
else:
db['output-dir'] = ns.output_dir
if not dir_exists(db['output-dir']):
raise BscanConfigError(
'`--output-dir` directory ' + db['output-dir'] +
' does not exist')
# --patterns; also loads from `configuration/patterns.txt`
patterns = load_config_file(
'patterns.txt',
ns.config_dir).splitlines()
if ns.patterns is not None:
if not ns.patterns:
raise BscanConfigError(
'`--patterns` requires at least one regex pattern')
else:
patterns.extend(ns.patterns)
db['patterns'] = re.compile('|'.join(patterns))
# --no-program-check
if not ns.no_program_check:
not_found_progs = []
progs = load_config_file(
'required-programs.txt',
ns.config_dir).splitlines()
for prog in progs:
if shutil.which(prog) is None:
not_found_progs.append(prog)
if not_found_progs:
raise BscanConfigError(
'required programs ' + ', '.join(not_found_progs) +
' could not be found on this system')
# --no-service-scans
db['no-service-scans'] = ns.no_service_scans
# load service information from `configuration/service-scans.toml`
db['services'] = toml.loads(
load_config_file('service-scans.toml', ns.config_dir))
# load quick scan method configuration
# derived from `--qs-method` + `configuration/port-scans.toml`
port_scan_config = toml.loads(
load_config_file('port-scans.toml', ns.config_dir))
qs_config = port_scan_config['quick']
qs_method_name = (ns.qs_method if ns.qs_method is not None else
qs_config['default'])
if qs_method_name not in qs_config or qs_method_name == 'default':
raise BscanConfigError(
'Invalid `--qs-method` specified: ' + str(qs_method_name))
qs_attrs = qs_config[qs_method_name]
db['quick-scan'] = PortScanConfig(
qs_method_name,
re.compile(qs_attrs['pattern']),
qs_attrs['scan'])
# load thorough scan method configuration
# derived from `--ts-method` + `configuration/port-scans.toml`
ts_config = port_scan_config['thorough']
ts_method_name = (ns.ts_method if ns.ts_method is not None else
ts_config['default'])
if ts_method_name not in ts_config or ts_method_name == 'default':
raise BscanConfigError(
'Invalid `--ts-method` specified: ' + str(ts_method_name))
ts_attrs = ts_config[ts_method_name]
db['thorough-scan'] = PortScanConfig(
ts_method_name,
re.compile(ts_attrs['pattern']),
ts_attrs['scan'])
# load udp scan method configuration
# derived from `--udp-method` + `configuration/port-scans.toml`
udp_config = port_scan_config['udp']
udp_method_name = (ns.udp_method if ns.udp_method is not None else
udp_config['default'])
if udp_method_name not in udp_config or udp_method_name == 'default':
raise BscanConfigError(
'Invalid `--udp-method` specified: ' + str(udp_method_name))
udp_attrs = udp_config[udp_method_name]
db['udp-scan'] = PortScanConfig(
udp_method_name,
re.compile(udp_attrs['pattern']),
udp_attrs['scan'])
# --status-interval
try:
db['status-interval'] = (30 if ns.status_interval is None
else int(ns.status_interval))
except ValueError:
raise BscanConfigError(
'Invalid `--status-interval` integer specified: ' +
str(ns.status_interval))
# runtime tracking of active subprocesses
db['subprocesses'] = dict()
# --web-word-list
if ns.web_word_list is None:
db['web-word-list'] = '/usr/share/dirb/wordlists/big.txt'
else:
db['web-word-list'] = ns.web_word_list
if not ns.no_file_check and not file_exists(db['web-word-list']):
raise BscanConfigError(
'`--web-word-list` file ' + db['web-word-list'] +
' does not exist')
# --quick-only
db['quick-only'] = ns.quick_only
# --hard
db['hard'] = ns.hard
# --ping-sweep
if ns.ping_sweep:
raise BscanConfigError(
'`--ping-sweep` option not yet implemented')
db['ping-sweep'] = ns.ping_sweep
# --udp
db['udp'] = ns.udp
# --verbose-status
db['verbose-status'] = ns.verbose_status
|
SoftLayer/CLI/order/lookup.py
|
dvzrv/softlayer-python
| 126 |
141324
|
<gh_stars>100-1000
"""Provides some details related to the order."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI.account.invoice_detail import get_invoice_table
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers import ordering
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@click.option('--details', is_flag=True, default=False, show_default=True,
help="Shows a very detailed list of charges")
@environment.pass_env
def cli(env, identifier, details):
"""Provides some details related to order owner, date order, cost information, initial invoice."""
manager = ordering.OrderingManager(env.client)
order = manager.get_order_detail(identifier)
order_table = get_order_table(order)
invoice = order.get('initialInvoice', {})
top_items = invoice.get('invoiceTopLevelItems', [])
invoice_id = invoice.get('id')
invoice_table = get_invoice_table(invoice_id, top_items, details)
order_table.add_row(['Initial Invoice', invoice_table])
env.fout(order_table)
def get_order_table(order):
"""Formats a table for billing order"""
title = "Order {id}".format(id=order.get('id'))
date_format = '%Y-%m-%d'
table = formatting.Table(["Key", "Value"], title=title)
table.align = 'l'
ordered_by = "IBM"
user = order.get('userRecord', None)
if user:
ordered_by = "{} ({})".format(user.get('displayName'), utils.lookup(user, 'userStatus', 'name'))
table.add_row(['Ordered By', ordered_by])
table.add_row(['Create Date', utils.clean_time(order.get('createDate'), date_format, date_format)])
table.add_row(['Modify Date', utils.clean_time(order.get('modifyDate'), date_format, date_format)])
table.add_row(['Order Approval Date', utils.clean_time(order.get('orderApprovalDate'), date_format, date_format)])
table.add_row(['Status', order.get('status')])
table.add_row(['Order Total Amount', "{price:.2f}".format(price=float(order.get('orderTotalAmount', '0')))])
table.add_row(['Invoice Total Amount', "{price:.2f}".
format(price=float(order.get('initialInvoice', {}).get('invoiceTotalAmount', '0')))])
items = order.get('items', [])
item_table = formatting.Table(["Item Description"])
item_table.align['description'] = 'l'
for item in items:
item_table.add_row([item.get('description')])
table.add_row(['Items', item_table])
return table
|
openbb_terminal/stocks/options/calculator_model.py
|
tehcoderer/GamestonkTerminal
| 255 |
141338
|
<gh_stars>100-1000
"""Calculator Model"""
___docformat__ = "numpy"
import logging
from typing import Dict, Tuple
import numpy as np
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def pnl_calculator(
strike: float, premium: float, put: bool, sell: bool, **kwargs: Dict[str, int]
) -> Tuple[np.ndarray, np.ndarray, float]:
"""Calculate profit/loss for different option variables
Parameters
----------
strike: float
Strike price
premium: float
Premium
put: bool
Is this a put option
sell: bool
Are you selling the option
kwargs
Returns
-------
price_at_expiry : np.ndarray
Array of prices
pnl: np.ndarray
Array of calculated profit/loss
break_even: float
Breakeven point
"""
if "x_min" in kwargs and "x_max" in kwargs:
price_at_expiry = np.linspace(kwargs["x_min"], kwargs["x_max"], 301) # type: ignore
else:
price_at_expiry = np.linspace(strike / 2, 1.5 * strike, 301)
sell_factor = [1, -1][sell]
if put:
break_even = strike - sell_factor * premium
pnl = strike - premium - price_at_expiry
pnl = sell_factor * 100 * np.where(price_at_expiry < strike, pnl, -premium)
else:
break_even = strike + sell_factor * premium
pnl = price_at_expiry - strike - premium
pnl = sell_factor * 100 * np.where(price_at_expiry > strike, pnl, -premium)
return price_at_expiry, pnl, break_even
|
scvelo/plotting/simulation.py
|
WeilerP/scvelo
| 272 |
141358
|
<reponame>WeilerP/scvelo
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import rcParams
from scvelo.core import SplicingDynamics
from scvelo.tools.dynamical_model_utils import get_vars, tau_inv, unspliced, vectorize
from .utils import make_dense
def get_dynamics(adata, key="fit", extrapolate=False, sorted=False, t=None):
alpha, beta, gamma, scaling, t_ = get_vars(adata, key=key)
if extrapolate:
u0_ = unspliced(t_, 0, alpha, beta)
tmax = t_ + tau_inv(u0_ * 1e-4, u0=u0_, alpha=0, beta=beta)
t = np.concatenate(
[np.linspace(0, t_, num=500), t_ + np.linspace(0, tmax, num=500)]
)
elif t is None or t is True:
t = adata.obs[f"{key}_t"].values if key == "true" else adata.layers[f"{key}_t"]
tau, alpha, u0, s0 = vectorize(np.sort(t) if sorted else t, t_, alpha, beta, gamma)
ut, st = SplicingDynamics(
alpha=alpha, beta=beta, gamma=gamma, initial_state=[u0, s0]
).get_solution(tau)
return alpha, ut, st
def compute_dynamics(
adata, basis, key="true", extrapolate=None, sort=True, t_=None, t=None
):
idx = adata.var_names.get_loc(basis) if isinstance(basis, str) else basis
key = "fit" if f"{key}_gamma" not in adata.var_keys() else key
alpha, beta, gamma, scaling, t_ = get_vars(adata[:, basis], key=key)
if "fit_u0" in adata.var.keys():
u0_offset, s0_offset = adata.var["fit_u0"][idx], adata.var["fit_s0"][idx]
else:
u0_offset, s0_offset = 0, 0
if t is None or isinstance(t, bool) or len(t) < adata.n_obs:
t = (
adata.obs[f"{key}_t"].values
if key == "true"
else adata.layers[f"{key}_t"][:, idx]
)
if extrapolate:
u0_ = unspliced(t_, 0, alpha, beta)
tmax = np.max(t) if True else tau_inv(u0_ * 1e-4, u0=u0_, alpha=0, beta=beta)
t = np.concatenate(
[np.linspace(0, t_, num=500), np.linspace(t_, tmax, num=500)]
)
tau, alpha, u0, s0 = vectorize(np.sort(t) if sort else t, t_, alpha, beta, gamma)
ut, st = SplicingDynamics(
alpha=alpha, beta=beta, gamma=gamma, initial_state=[u0, s0]
).get_solution(tau, stacked=False)
ut, st = ut * scaling + u0_offset, st + s0_offset
return alpha, ut, st
def show_full_dynamics(
adata,
basis,
key="true",
use_raw=False,
linewidth=1,
linecolor=None,
show_assignments=None,
ax=None,
):
if ax is None:
ax = pl.gca()
color = linecolor if linecolor else "grey" if key == "true" else "purple"
linewidth = 0.5 * linewidth if key == "true" else linewidth
label = "learned dynamics" if key == "fit" else "true dynamics"
line = None
if key != "true":
_, ut, st = compute_dynamics(
adata, basis, key, extrapolate=False, sort=False, t=show_assignments
)
if not isinstance(show_assignments, str) or show_assignments != "only":
ax.scatter(st, ut, color=color, s=1)
if show_assignments is not None and show_assignments is not False:
skey, ukey = (
("spliced", "unspliced")
if use_raw or "Ms" not in adata.layers.keys()
else ("Ms", "Mu")
)
s, u = (
make_dense(adata[:, basis].layers[skey]).flatten(),
make_dense(adata[:, basis].layers[ukey]).flatten(),
)
ax.plot(
np.array([s, st]),
np.array([u, ut]),
color="grey",
linewidth=0.1 * linewidth,
)
if not isinstance(show_assignments, str) or show_assignments != "only":
_, ut, st = compute_dynamics(
adata, basis, key, extrapolate=True, t=show_assignments
)
(line,) = ax.plot(st, ut, color=color, linewidth=linewidth, label=label)
idx = adata.var_names.get_loc(basis)
beta, gamma = adata.var[f"{key}_beta"][idx], adata.var[f"{key}_gamma"][idx]
xnew = np.linspace(np.min(st), np.max(st))
ynew = gamma / beta * (xnew - np.min(xnew)) + np.min(ut)
ax.plot(xnew, ynew, color=color, linestyle="--", linewidth=linewidth)
return line, label
def simulation(
adata,
var_names="all",
legend_loc="upper right",
legend_fontsize=20,
linewidth=None,
dpi=None,
xkey="true_t",
ykey=None,
colors=None,
**kwargs,
):
from scvelo.tools.utils import make_dense
from .scatter import scatter
if ykey is None:
ykey = ["unspliced", "spliced", "alpha"]
if colors is None:
colors = ["darkblue", "darkgreen", "grey"]
var_names = (
adata.var_names
if isinstance(var_names, str) and var_names == "all"
else [name for name in var_names if name in adata.var_names]
)
figsize = rcParams["figure.figsize"]
ncols = len(var_names)
for i, gs in enumerate(
pl.GridSpec(
1, ncols, pl.figure(None, (figsize[0] * ncols, figsize[1]), dpi=dpi)
)
):
idx = adata.var_names.get_loc(var_names[i])
alpha, ut, st = compute_dynamics(adata, idx)
t = (
adata.obs[xkey]
if xkey in adata.obs.keys()
else make_dense(adata.layers["fit_t"][:, idx])
)
idx_sorted = np.argsort(t)
t = t[idx_sorted]
ax = pl.subplot(gs)
_kwargs = {"alpha": 0.3, "title": "", "xlabel": "time", "ylabel": "counts"}
_kwargs.update(kwargs)
linewidth = 1 if linewidth is None else linewidth
ykey = [ykey] if isinstance(ykey, str) else ykey
for j, key in enumerate(ykey):
if key in adata.layers:
y = make_dense(adata.layers[key][:, idx])[idx_sorted]
ax = scatter(x=t, y=y, color=colors[j], ax=ax, show=False, **_kwargs)
if key == "unspliced":
ax.plot(t, ut, label="unspliced", color=colors[j], linewidth=linewidth)
elif key == "spliced":
ax.plot(t, st, label="spliced", color=colors[j], linewidth=linewidth)
elif key == "alpha":
largs = dict(linewidth=linewidth, linestyle="--")
ax.plot(t, alpha, label="alpha", color=colors[j], **largs)
pl.xlim(0)
pl.ylim(0)
if legend_loc != "none" and i == ncols - 1:
pl.legend(loc=legend_loc, fontsize=legend_fontsize)
|
keras/caffe/models/segmentation/test_segmentation.py
|
lvapeab/keras
| 259 |
141377
|
"""
author: <NAME> (https://github.com/akshaychawla)
TEST:rs
Test convert.py's ability to handle Deconvolution and Crop laye
by converting voc-fcn8s .prototxt and .caffemodel present in the caffe/models/segmentation folder
"""
# import os
# import inspect
# import numpy as np
# import keras.caffe.convert as convert
# from scipy import misc
# import matplotlib.pyplot as plt
# from subprocess import call
# check whether files are present in folder
"""
path = os.path.dirname(inspect.getfile(inspect.currentframe()))
assert os.path.exists(path + "/deploy.prototxt"), "Err. Couldn't find the debug.prototxt file"
assert os.path.exists(path + "/horse.png"), "Err. Couldn't find the horse.png image file"
if not os.path.exists(path + "/fcn8s-heavy-pascal.caffemodel"):
call(["wget http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel -O "
+ "./" + path + "/fcn8s-heavy-pascal.caffemodel"],
shell=True)
assert os.path.exists(path + "/fcn8s-heavy-pascal.caffemodel"), "Err. Cannot find .caffemodel file. \
please download file using command : wget http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel "
model = convert.caffe_to_keras(path + "/deploy.prototxt", path + "/fcn8s-heavy-pascal.caffemodel", debug=1)
print ("Yay!")
# 1. load image
img = misc.imread(path + "/horse.png")
# modify it
img = np.rollaxis(img, 2)
img = np.expand_dims(img, 0)
# 2. run forward pass
op = model.predict(img)
# 3. reshape output
op = op[0]
op = op.reshape((500, 500, 21))
op_arg = np.argmax(op, axis=2)
# 4. plot output
plt.imshow(op_arg)
plt.show()
print ("..done")
"""
|
.github/workflows/update_init_el.py
|
jstranik/nix-doom-emacs
| 220 |
141406
|
<filename>.github/workflows/update_init_el.py
#!/usr/bin/env python3
import sys
from difflib import unified_diff
from os import environ as env
from typing import cast
from github import Github
from github.ContentFile import ContentFile
from github.GithubException import GithubException
from github.Repository import Repository
API_TOKEN = env["GITHUB_API_TOKEN"]
REPOSITORY = env["GITHUB_REPOSITORY"]
BASE_BRANCH = env.get("GITHUB_BASE_BRANCH", "master")
DRY_RUN = bool(env.get("GITHUB_DRY_RUN", False))
INIT_EL = "test/doom.d/init.el"
UPSTREAM_INIT_EL = "init.example.el"
DOOM_UPSTREAM = "hlissner/doom-emacs"
UPSTREAM_BRANCH = "develop"
def create_pr(
repo: Repository,
pr_branch_name: str,
head: str,
file: ContentFile,
updated_content: str,
pr_title: str,
pr_body: str,
):
try:
repo.get_branch(pr_branch_name)
print(f"Branch '{pr_branch_name}' already exist. Skipping update.")
return
except GithubException as ex:
if ex.status != 404:
raise
pr_branch = repo.create_git_ref(pr_branch_name, head)
repo.update_file(
file.path,
f"{pr_title}\n\n{pr_body}",
updated_content,
file.sha,
branch=pr_branch_name,
)
repo.create_pull(title=pr_title, body=pr_body, head=pr_branch.ref, base=BASE_BRANCH)
def main():
if API_TOKEN:
github = Github(API_TOKEN)
else:
print("GITHUB_API_TOKEN is required")
sys.exit(1)
repo = github.get_repo(REPOSITORY)
head = repo.get_branch(BASE_BRANCH).commit.sha
init_el = cast(ContentFile, repo.get_contents(INIT_EL, ref=BASE_BRANCH))
doom_repo = github.get_repo(DOOM_UPSTREAM)
upstream_init_el = cast(
ContentFile, doom_repo.get_contents(UPSTREAM_INIT_EL, ref=UPSTREAM_BRANCH)
)
diff = "".join(
unified_diff(
init_el.decoded_content.decode().splitlines(keepends=True),
upstream_init_el.decoded_content.decode().splitlines(keepends=True),
)
)
if not diff:
print(f"{INIT_EL} is up-to date")
return
print(f"{INIT_EL} updated.")
print(diff)
upstream_rev = doom_repo.get_branch(UPSTREAM_BRANCH).commit.sha
title = f"{INIT_EL}: Updating from {DOOM_UPSTREAM} - {upstream_rev[:8]}"
body = f"""\
### Changes for {INIT_EL}
```diff
{diff}
```
"""
print(f"[{INIT_EL}] - Creating PR\nTitle: {title}\nBody:\n{body}")
if DRY_RUN:
print(f"DRY-RUN: NOT creating PR...")
return
pr_branch_name = f"refs/heads/update/init.el-{upstream_rev}"
create_pr(
repo,
pr_branch_name,
head,
init_el,
upstream_init_el.decoded_content.decode(),
title,
body,
)
if __name__ == "__main__":
main()
|
tests/notebooks/mirror/ipynb_to_hydrogen/nteract_with_parameter.py
|
st--/jupytext
| 5,378 |
141416
|
# ---
# jupyter:
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% outputHidden=false inputHidden=false tags=["parameters"]
param = 4
# %% outputHidden=false inputHidden=false
import pandas as pd
# %% outputHidden=false inputHidden=false
df = pd.DataFrame({'A': [1, 2], 'B': [3 + param, 4]},
index=pd.Index(['x0', 'x1'], name='x'))
df
# %% outputHidden=false inputHidden=false
%matplotlib inline
df.plot(kind='bar')
|
bin/sam_to_psl.py
|
baraaorabi/flair
| 139 |
141435
|
#!/usr/bin/env python3
import sys, csv, re, os
try:
sam = open(sys.argv[1])
outfilename = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3] == 'quick':
quick = True
chromsizefile = ''
else:
quick = False
chromsizefile = sys.argv[3]
else:
chromsizefile = ''
quick = False
except:
sys.stderr.write('usage: script.py samfile outpsl [chromsizefile|"quick"]\n')
sys.stderr.write('written for minimap sams\n')
sys.exit(1)
if chromsizefile:
chromsizes = {}
for line in open(chromsizefile):
line = line.rstrip().split('\t')
chromsizes[line[0]] = line[1]
with open(outfilename, 'wt') as outfile:
writer = csv.writer(outfile, delimiter='\t', lineterminator=os.linesep)
for line in sam:
if line.startswith('@'):
continue
line = line.split('\t')
tname = line[2]
if tname == '*':
continue
qname, flag, pos, cigar, seq, qual = line[0], int(line[1]), int(line[3]) - 1, line[5], line[9], line[10]
matches = re.findall('([0-9]+)([A-Z])', cigar)
matchlen = mismatches = relstart = qstart = qconsumed = 0
blocksizes, relblockstarts, qstarts = [], [], []
tend = pos
qnuminsert = 0
qbaseinsert = 0
tnuminsert = 0
tbaseinsert = 0 # deletion
qsize_backup = 0
num, op = int(matches[0][0]), matches[0][1]
if op == 'H': # check for H and S at beginning of cigar
looplist = matches[1:]
qstart = num
relstart += num
qsize_backup += num # technically does not consume q but useful when comparing a read's secondary alignments
else:
looplist = matches
num, op = int(looplist[0][0]), looplist[0][1]
if op == 'S':
if not qstart and not matchlen:
qstart = num
qsize_backup += num
looplist = looplist[1:]
else:
looplist = looplist
for m in looplist: # does not check for H and S
num, op = int(m[0]), m[1]
if op == 'M': # consumes reference
blocksizes += [num]
relblockstarts += [relstart]
qstarts += [qconsumed]
relstart += num
matchlen += num
tend += num
qconsumed += num
qsize_backup += num
elif op == 'D': # consumes reference
relstart += num
mismatches += num
tend += num
qnuminsert += num
elif op == 'N': # consumes reference
tend += num
relstart += num
elif quick:
continue
elif op == 'I':
qconsumed += num
tbaseinsert += num
tnuminsert += 1
qsize_backup += num
elif op == 'H' or op == 'S':
break
else:
sys.stderr.write(cigar+'\n')
sys.stderr.write(op + ' unrecognized\n')
sys.exit(1)
blockstarts = ','.join([str(pos + s) for s in relblockstarts]) + ','
blocksizes = ','.join([str(s) for s in blocksizes]) + ','
if quick:
writer.writerow([0, 0, 0, 0, 0, 0, 0, 0, 0, qname, 0, 0, 0, \
tname, 0, 0, 0, 0, blocksizes, 0, blockstarts])
continue
num, op = int(matches[-1][0]), matches[-1][1]
if op == 'H': # check for H and S at the end of cigar
looplist = matches[:-1]
qsize_backup += num # technically does not consume q but useful when comparing a read's secondary alignments
else:
looplist = matches
num, op = int(looplist[-1][0]), looplist[-1][1]
if op == 'S':
qsize_backup += num
blockcount = len(relblockstarts)
relblockstarts = ','.join([str(s) for s in relblockstarts]) + ','
qstarts = ','.join([str(qstart + s) for s in qstarts]) + ','
qend = qconsumed + qstart
ncount = seq.count('N')
qsize = len(seq)
qsize = qsize_backup
if chromsizefile:
tsize = chromsizes[tname] # chromosome length
else:
tsize = 0
tstart = pos
strand = '-' if flag & 0x10 else '+' # flag&0x10 is 1 when the strand is -
mismatches = qbaseinsert = qnuminsert = tnuminsert = tbaseinsert = 0
writer.writerow([matchlen, mismatches, 0, ncount, qnuminsert, qbaseinsert, \
tnuminsert, tbaseinsert, strand, qname, qsize, qstart, qend, \
tname, tsize, tstart, tend, blockcount, blocksizes, qstarts, blockstarts])
|
test/test_mount.py
|
ltmix-web/bottle
| 6,086 |
141440
|
<reponame>ltmix-web/bottle
# -*- coding: utf-8 -*-
import bottle
from .tools import ServerTestBase
from bottle import response
class TestAppMounting(ServerTestBase):
def setUp(self):
ServerTestBase.setUp(self)
self.subapp = bottle.Bottle()
@self.subapp.route('/')
@self.subapp.route('/test/<test>')
def test(test='foo'):
return test
def test_mount_unicode_path_bug602(self):
self.app.mount('/mount/', self.subapp)
self.assertBody('äöü', '/mount/test/äöü')
self.app.route('/route/<param>', callback=lambda param: param)
self.assertBody('äöü', '/route/äöü')
def test_mount_order_bug581(self):
self.app.mount('/test/', self.subapp)
# This should not match
self.app.route('/<test:path>', callback=lambda test: test)
self.assertStatus(200, '/test/')
self.assertBody('foo', '/test/')
def test_mount(self):
self.app.mount('/test/', self.subapp)
self.assertStatus(404, '/')
self.assertStatus(404, '/test')
self.assertStatus(200, '/test/')
self.assertBody('foo', '/test/')
self.assertStatus(200, '/test/test/bar')
self.assertBody('bar', '/test/test/bar')
def test_mount_meta(self):
self.app.mount('/test/', self.subapp)
self.assertEqual(
self.subapp.config['_mount.prefix'], '/test/')
self.assertEqual(
self.subapp.config['_mount.app'], self.app)
def test_no_slash_prefix(self):
self.app.mount('/test', self.subapp)
self.assertStatus(404, '/')
self.assertStatus(200, '/test')
self.assertBody('foo', '/test')
self.assertStatus(200, '/test/')
self.assertBody('foo', '/test/')
self.assertStatus(200, '/test/test/bar')
self.assertBody('bar', '/test/test/bar')
def test_mount_no_plugins(self):
def plugin(func):
def wrapper(*a, **ka):
return 'Plugin'
return wrapper
self.app.install(plugin)
self.app.route('/foo', callback=lambda: 'baz')
self.app.mount('/test/', self.subapp)
self.assertBody('Plugin', '/foo')
self.assertBody('foo', '/test/')
def test_mount_wsgi(self):
status = {}
def app(environ, start_response):
start_response('200 OK', [('X-Test', 'WSGI')])
return 'WSGI ' + environ['PATH_INFO']
self.app.mount('/test', app)
self.assertStatus(200, '/test/')
self.assertBody('WSGI /', '/test')
self.assertBody('WSGI /', '/test/')
self.assertHeader('X-Test', 'WSGI', '/test/')
self.assertBody('WSGI /test/bar', '/test/test/bar')
def test_mount_cookie(self):
@self.subapp.route('/cookie')
def test_cookie():
response.set_cookie('a', 'a')
response.set_cookie('b', 'b')
self.app.mount('/test', self.subapp)
c = self.urlopen('/test/cookie')['header']['Set-Cookie']
self.assertEqual(['a=a', 'b=b'], list(sorted(c.split(', '))))
def test_mount_wsgi_ctype_bug(self):
status = {}
def app(environ, start_response):
start_response('200 OK', [('Content-Type', 'test/test')])
return 'WSGI ' + environ['PATH_INFO']
self.app.mount('/test', app)
self.assertHeader('Content-Type', 'test/test', '/test/')
def test_mount_json_bug(self):
@self.subapp.route('/json')
def test_cookie():
return {'a': 5}
self.app.mount('/test', self.subapp)
self.assertHeader('Content-Type', 'application/json', '/test/json')
def test_mount_get_url(self):
@self.subapp.route('/test', name="test")
def test_cookie():
return self.subapp.get_url("test")
self.app.mount('/test', self.subapp)
self.assertBody('/test/test', '/test/test')
class TestAppMerging(ServerTestBase):
def setUp(self):
ServerTestBase.setUp(self)
self.subapp = bottle.Bottle()
@self.subapp.route('/')
@self.subapp.route('/test/:test')
def test(test='foo'):
return test
def test_merge(self):
self.app.merge(self.subapp)
self.assertStatus(200, '/')
self.assertBody('foo', '/')
self.assertStatus(200, '/test/bar')
self.assertBody('bar', '/test/bar')
|
utils/data_manager.py
|
stu1130/deeplearning-benchmark
| 131 |
141452
|
<reponame>stu1130/deeplearning-benchmark
import os
def getImagenetData(dataset):
if dataset == 'imagenet' or dataset == 'imagenet-256px-q90':
os.system('mkdir -p data')
if not os.path.exists('./data/imagenet1k-train.rec'):
os.system("aws s3 cp s3://imagenet-rec/imagenet1k-train.rec data/")
if not os.path.exists('./data/imagenet1k-val.rec'):
os.system("aws s3 cp s3://imagenet-rec/imagenet1k-val.rec data/")
elif dataset == 'imagenet-480px-q95':
if not os.path.exists(os.path.expanduser('~/data/train-480px-q95.rec')):
os.system("wget -q https://s3.amazonaws.com/aws-ml-platform-datasets/imagenet/480px-q95/train-480px-q95.rec -P ~/data/")
if not os.path.exists(os.path.expanduser('~/data/val-256px-q95.rec')):
os.system("wget -q https://s3.amazonaws.com/aws-ml-platform-datasets/imagenet/256px-q95/val-256px-q95.rec -P ~/data/")
elif dataset == 'imagenet-480px-256px-q95':
if not os.path.exists(os.path.expanduser('~/data/train-480px-q95.rec')):
os.system("wget -q https://s3.amazonaws.com/aws-ml-platform-datasets/imagenet/480px-q95/train-480px-q95.rec -P ~/data/")
if not os.path.exists(os.path.expanduser('~/data/val-256px-q95.rec')):
os.system("wget -q https://s3.amazonaws.com/aws-ml-platform-datasets/imagenet/256px-q95/val-256px-q95.rec -P ~/data/")
if not os.path.exists(os.path.expanduser('~/data/train-256px-q95.rec')):
os.system("wget -q https://s3.amazonaws.com/aws-ml-platform-datasets/imagenet/256px-q95/train-256px-q95.rec -P ~/data/")
elif dataset == 'imagenet-ebs':
if not os.path.exists(os.path.expanduser('~/data/')):
os.system('mkdir -p ~/data/')
os.system("aws s3 sync s3://aws-ml-platform-datasets/imagenet/pass-through/ ~/data/ --exclude \"train*\"")
else:
raise ValueError('Unknown dataset')
|
backend/app/app/api/api_v1/router/home/__init__.py
|
PY-GZKY/Tplan
| 121 |
141463
|
<reponame>PY-GZKY/Tplan
from .index import router
|
solidity/python/PerformanceExpTestCross.py
|
surzm/contracts-solidity
| 200 |
141480
|
<filename>solidity/python/PerformanceExpTestCross.py
import Web3Wrapper
import InputGenerator
MINIMUM_VALUE_BALANCE = 100
MAXIMUM_VALUE_BALANCE = 10 ** 34
GROWTH_FACTOR_BALANCE = 2.5
MINIMUM_VALUE_WEIGHT = 100000
MAXIMUM_VALUE_WEIGHT = 900000
GROWTH_FACTOR_WEIGHT = 1.5
MINIMUM_VALUE_AMOUNT = 1
MAXIMUM_VALUE_AMOUNT = 10 ** 34
GROWTH_FACTOR_AMOUNT = 2.5
def Main():
rangeBalance1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE)
rangeWeight1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT)
rangeBalance2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE)
rangeWeight2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT)
rangeAmount = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_AMOUNT, MAXIMUM_VALUE_AMOUNT, GROWTH_FACTOR_AMOUNT)
testNum = 0
numOfTests = len(rangeBalance1) * len(rangeWeight1) * len(rangeBalance2) * len(rangeWeight2) * len(rangeAmount)
FormulaContract = Web3Wrapper.Contract('BancorFormula')
FormulaContract.setter().init()
FormulaContractAddr = FormulaContract.tester()
minGas = float('+inf')
maxGas = float('-inf')
totalGas = 0
countGas = 0
for balance1 in rangeBalance1:
for weight1 in rangeWeight1:
for balance2 in rangeBalance2:
for weight2 in rangeWeight2:
for amount in rangeAmount:
testNum += 1
if True:
try:
gas = FormulaContractAddr.crossReserveTargetAmount(balance1, weight1, balance2, weight2, amount)
minGas = min(minGas, gas)
maxGas = max(maxGas, gas)
totalGas += gas
countGas += 1
print('Test {} out of {}: gas = {}, minimum = {}, maximum = {}, average = {}'.format(testNum, numOfTests, gas, minGas, maxGas, totalGas // countGas))
except:
pass
Main()
|
audiomate/annotations/label.py
|
CostanzoPablo/audiomate
| 133 |
141481
|
<reponame>CostanzoPablo/audiomate<gh_stars>100-1000
import copy
from functools import total_ordering
@total_ordering
class Label:
"""
Represents a label that describes some part of an utterance.
Args:
value (str): The text of the label.
start (float): Start of the label within the utterance in seconds.
(default: 0)
end (float): End of the label within the utterance in seconds.
(default: inf) (inf defines the end of the utterance)
meta (dict): A dictionary containing additional information
for the label.
Attributes:
label_list (LabelList): The label-list this label is belonging to.
"""
__slots__ = ['value', 'start', 'end', 'label_list', 'meta']
def __init__(self, value, start=0, end=float('inf'), meta=None):
self.value = value
self.start = start
self.end = end
self.meta = meta or {}
self.label_list = None
def __eq__(self, other):
return (
self.start == other.start and
self.end == other.end and
self.value.lower() == other.value.lower()
)
def __lt__(self, other):
data_this = (self.start, self.end, self.value.lower())
data_other = (other.start, other.end, other.value.lower())
return data_this < data_other
def __repr__(self) -> str:
return 'Label({}, {}, {})'.format(self.value, self.start, self.end)
def __copy__(self):
return Label(
self.value,
start=self.start,
end=self.end,
meta=self.meta
)
def __deepcopy__(self, memo):
return Label(
self.value,
start=self.start,
end=self.end,
meta=copy.deepcopy(self.meta, memo)
)
@property
def start_abs(self):
"""
Return the absolute start of the label in seconds relative to
the signal. If the label isn't linked to any utterance via label-list,
it is assumed ``self.start`` is relative to the start of the signal,
hence ``self.start`` == ``self.start_abs``.
"""
if self.label_list is None or self.label_list.utterance is None:
return self.start
return self.label_list.utterance.start + self.start
@property
def end_abs(self):
"""
Return the absolute end of the label in seconds relative to the signal.
If the label isn't linked to any utterance via label-list,
it is assumed ``self.end`` is relative to the start of the signal,
hence ``self.end`` == ``self.end_abs``.
"""
if self.label_list is None or self.label_list.utterance is None:
return self.end
elif self.end == float('inf'):
return self.label_list.utterance.end_abs
else:
return self.end + self.label_list.utterance.start
@property
def duration(self):
""" Return the duration of the label in seconds. """
return self.end_abs - self.start_abs
@property
def length(self):
""" Return the length of the label (Number of characters). """
return len(self.value)
def read_samples(self, sr=None):
"""
Read the samples of the utterance.
Args:
sr (int): If None uses the sampling rate given by the track,
otherwise resamples to the given sampling rate.
Returns:
np.ndarray: A numpy array containing the samples as a
floating point (numpy.float32) time series.
"""
duration = None
if self.end != float('inf') or self.label_list.utterance.end >= 0:
duration = self.duration
track = self.label_list.utterance.track
return track.read_samples(
sr=sr,
offset=self.start_abs,
duration=duration
)
def tokenized(self, delimiter=' '):
"""
Return a list with tokens from the value of the label.
Tokens are extracted by splitting the string using ``delimiter`` and
then trimming any whitespace before and after splitted strings.
Args:
delimiter (str): The delimiter used to split into tokens.
(default: space)
Return:
list: A list of tokens in the order they occur in the label.
Examples:
>>> label = Label('as is oh')
>>> label.tokenized()
['as', 'is', 'oh']
Using a different delimiter (whitespace is trimmed anyway):
>>> label = Label('oh hi, as, is ')
>>> label.tokenized(delimiter=',')
['oh hi', 'as', 'is']
"""
tokens = self.value.split(sep=delimiter)
tokens = [t.strip() for t in tokens]
while '' in tokens:
tokens.remove('')
return tokens
def do_overlap(self, other_label, adjacent=True):
"""
Determine whether ``other_label`` overlaps with this label.
If ``adjacent==True``, adjacent labels are also considered as overlapping.
Args:
other_label (Label): Another label.
adjacent (bool): If ``True``, adjacent labels are
considered as overlapping.
Returns:
bool: ``True`` if the two labels overlap, ``False`` otherwise.
"""
this_end = self.end
other_end = other_label.end
if this_end == float('inf'):
this_end = self.end_abs
if other_end == float('inf'):
other_end = other_label.end_abs
if adjacent:
first_ends_before = this_end < other_label.start
second_ends_before = other_end < self.start
else:
first_ends_before = this_end <= other_label.start
second_ends_before = other_end <= self.start
return not (first_ends_before or second_ends_before)
def overlap_duration(self, other_label):
"""
Return the duration of the overlapping part between this label
and ``other_label``.
Args:
other_label(Label): Another label to check.
Return:
float: The duration of overlap in seconds.
Example:
>>> label_a = Label('a', 3.4, 5.6)
>>> label_b = Label('b', 4.8, 6.2)
>>> label_a.overlap_duration(label_b)
0.8
"""
this_end = self.end
other_end = other_label.end
if this_end == float('inf'):
this_end = self.end_abs
if other_end == float('inf'):
other_end = other_label.end_abs
start_overlap = max(self.start, other_label.start)
end_overlap = min(this_end, other_end)
return max(0, end_overlap - start_overlap)
|
vega/quota/quota_affinity.py
|
This-50m/vega
| 724 |
141485
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Quota for Affinity."""
import logging
import pandas as pd
from sklearn import ensemble
class AffinityModelBase(object):
"""Base class for affinity regression."""
def __init__(self, affinity_file, affinity_value):
self.affinity_report = pd.read_csv(affinity_file)
self.standard = affinity_value
self.ml_model = ensemble.RandomForestClassifier(n_estimators=20)
def build_model(self):
"""Build regression model."""
desc = self.affinity_report['desc']
inputs = self.generate_input_space(desc)
labels = self.generate_label()
self.ml_model.fit(inputs, labels)
def predict(self, input):
"""Predict output from input."""
return self.ml_model.predict(input[:1])[0]
def generate_input_space(self, desc):
"""Generate input space from desc."""
if desc is None:
return None
space_list = []
for idx in range(len(desc)):
desc_item = eval(desc.iloc[idx])
space_dict = {}
self.init_space_dict(space_dict)
for key, value in desc_item.items():
self.get_space_dict(key, value, space_dict)
# space_dict[_metric_key] = eval(pfms.iloc[idx])[_metric_key]
if space_dict:
space_list.append(space_dict)
return pd.DataFrame(space_list)
def generate_label(self):
"""Generate label from affinity report."""
_pfms = self.affinity_report['performance']
_metric_key = eval(self.affinity_report['_objective_keys'][0])[0]
label_list = []
for pfm in _pfms:
value = eval(pfm)[_metric_key]
clc = 1 if value > self.standard else 0
label_list.append({_metric_key: clc})
return pd.DataFrame(label_list)
def init_space_dict(self, space_dict):
"""Initialize space dict."""
pass
def get_space_dict(self, *args):
"""Get space dict from desc."""
raise NotImplementedError
class AffinityModelSrea(AffinityModelBase):
"""Affinity Regression for SR-EA."""
def __init__(self, affinity_file, affinity_value):
super(AffinityModelSrea, self).__init__(affinity_file, affinity_value)
def init_space_dict(self, space_dict):
"""Initialize space dict."""
for i in range(80):
space_dict['code_{}'.format(i)] = False
def get_space_dict(self, key, value, space_dict):
"""Get space dict from desc."""
key = key.split('.')[-1]
if isinstance(value, dict):
for sub_key, sub_value in value.items():
self.get_space_dict(sub_key, sub_value, space_dict)
elif key == 'code' and isinstance(value, str):
for i, element in enumerate(value):
if element == '0':
space_dict[key + '_{}'.format(i)] = 1
elif element == '1':
space_dict[key + '_{}'.format(i)] = 2
else:
space_dict[key + '_{}'.format(i)] = 3
class QuotaAffinity(object):
"""Generate affinity model of search space, filter bad sample."""
def __init__(self, affinity_cfg):
affinity_class = eval(self.get_affinity_model(affinity_cfg.type))
self.affinity_model = affinity_class(affinity_cfg.affinity_file, affinity_cfg.affinity_value)
self.affinity_model.build_model()
def get_affinity_model(self, affinity_type):
"""Get specific affinity model name."""
affinity_model_dict = {
'sr_ea': 'AffinityModelSrea'
}
return affinity_model_dict[affinity_type]
def is_affinity(self, desc):
"""Judge the desc is affinity or not."""
desc_dict = {'desc': str(desc)}
input = pd.DataFrame([desc_dict])
input = self.affinity_model.generate_input_space(input['desc'])
try:
result = self.affinity_model.predict(input)
except Exception:
logging.info('The sampled desc is not affinity')
return False
if result == 1:
logging.info('The sampled desc is affinity')
return True
else:
logging.info('The sampled desc is not affinity')
return False
|
server/src/protobuf/protobuf-2.6.1/python/google/protobuf/internal/message_test.py
|
Steven0917/TT
| 2,151 |
141492
|
<reponame>Steven0917/TT<filename>server/src/protobuf/protobuf-2.6.1/python/google/protobuf/internal/message_test.py
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = '<EMAIL> (<NAME>)'
import copy
import math
import operator
import pickle
import sys
from google.apputils import basetest
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(basetest.TestCase):
def testBadUtf8String(self):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
unittest_pb2.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('field: protobuf_unittest.TestAllTypes.optional_string',
str(context.exception))
def testGoldenMessage(self):
golden_data = test_util.GoldenFileData(
'golden_message_oneof_implemented')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCD\x02\x00\x00\x80\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCD\x02\x00\x00\x80\xFF'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = (b'\x5D\x00\x00\xC0\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
b'\xCD\x02\x00\x00\xC0\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testFloatPrinting(self):
message = unittest_pb2.TestAllTypes()
message.optional_float = 2.0
self.assertEqual(str(message), 'optional_float: 2.0\n')
def testHighPrecisionFloatPrinting(self):
message = unittest_pb2.TestAllTypes()
message.optional_double = 0.12345678912345678
if sys.version_info.major >= 3:
self.assertEqual(str(message), 'optional_double: 0.12345678912345678\n')
else:
self.assertEqual(str(message), 'optional_double: 0.123456789123\n')
def testUnknownFieldPrinting(self):
populated = unittest_pb2.TestAllTypes()
test_util.SetAllNonLazyFields(populated)
empty = unittest_pb2.TestEmptyMessage()
empty.ParseFromString(populated.SerializeToString())
self.assertEqual(str(empty), '')
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append(b'a')
message.repeated_bytes.append(b'c')
message.repeated_bytes.append(b'b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], b'a')
self.assertEqual(message.repeated_bytes[1], b'b')
self.assertEqual(message.repeated_bytes[2], b'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=operator.attrgetter('bb'))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
if sys.version_info.major >= 3: return # No cmp sorting in PY3.
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
if sys.version_info.major < 3: # No cmp sorting in PY3.
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
if sys.version_info.major < 3: # No cmp sorting in PY3.
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testRepeatedFieldsComparable(self):
m1 = unittest_pb2.TestAllTypes()
m2 = unittest_pb2.TestAllTypes()
m1.repeated_int32.append(0)
m1.repeated_int32.append(1)
m1.repeated_int32.append(2)
m2.repeated_int32.append(0)
m2.repeated_int32.append(1)
m2.repeated_int32.append(2)
m1.repeated_nested_message.add().bb = 1
m1.repeated_nested_message.add().bb = 2
m1.repeated_nested_message.add().bb = 3
m2.repeated_nested_message.add().bb = 1
m2.repeated_nested_message.add().bb = 2
m2.repeated_nested_message.add().bb = 3
if sys.version_info.major >= 3: return # No cmp() in PY3.
# These comparisons should not raise errors.
_ = m1 < m2
_ = m1.repeated_nested_message < m2.repeated_nested_message
# Make sure cmp always works. If it wasn't defined, these would be
# id() comparisons and would all fail.
self.assertEqual(cmp(m1, m2), 0)
self.assertEqual(cmp(m1.repeated_int32, m2.repeated_int32), 0)
self.assertEqual(cmp(m1.repeated_int32, [0, 1, 2]), 0)
self.assertEqual(cmp(m1.repeated_nested_message,
m2.repeated_nested_message), 0)
with self.assertRaises(TypeError):
# Can't compare repeated composite containers to lists.
cmp(m1.repeated_nested_message, m2.repeated_nested_message[:])
# TODO(anuraag): Implement extensiondict comparison in C++ and then add test
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def ensureNestedMessageExists(self, msg, attribute):
"""Make sure that a nested message object exists.
As soon as a nested message attribute is accessed, it will be present in the
_fields dict, without being marked as actually being set.
"""
getattr(msg, attribute)
self.assertFalse(msg.HasField(attribute))
def testOneofGetCaseNonexistingField(self):
m = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field')
def testOneofSemantics(self):
m = unittest_pb2.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
m.oneof_string = u'foo'
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertTrue(m.HasField('oneof_string'))
m.oneof_nested_message.bb = 11
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_string'))
self.assertTrue(m.HasField('oneof_nested_message'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_nested_message'))
self.assertTrue(m.HasField('oneof_bytes'))
def testOneofCompositeFieldReadAccess(self):
m = unittest_pb2.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertEqual(11, m.oneof_uint32)
def testOneofHasField(self):
m = unittest_pb2.TestAllTypes()
self.assertFalse(m.HasField('oneof_field'))
m.oneof_uint32 = 11
self.assertTrue(m.HasField('oneof_field'))
m.oneof_bytes = b'bb'
self.assertTrue(m.HasField('oneof_field'))
m.ClearField('oneof_bytes')
self.assertFalse(m.HasField('oneof_field'))
def testOneofClearField(self):
m = unittest_pb2.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_field')
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearSetField(self):
m = unittest_pb2.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_uint32')
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearUnsetField(self):
m = unittest_pb2.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
m.ClearField('oneof_nested_message')
self.assertEqual(11, m.oneof_uint32)
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
def testOneofDeserialize(self):
m = unittest_pb2.TestAllTypes()
m.oneof_uint32 = 11
m2 = unittest_pb2.TestAllTypes()
m2.ParseFromString(m.SerializeToString())
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
def testHasFieldOnRepeatedField(self):
"""Using HasField on a repeated field should raise an exception.
"""
m = unittest_pb2.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.HasField('repeated_int32')
class ValidTypeNamesTest(basetest.TestCase):
def assertImportFromName(self, msg, base_name):
# Parse <type 'module.class_name'> to extra 'some.name' as a string.
tp_name = str(type(msg)).split("'")[1]
valid_names = ('Repeated%sContainer' % base_name,
'Repeated%sFieldContainer' % base_name)
self.assertTrue(any(tp_name.endswith(v) for v in valid_names),
'%r does end with any of %r' % (tp_name, valid_names))
parts = tp_name.split('.')
class_name = parts[-1]
module_name = '.'.join(parts[:-1])
__import__(module_name, fromlist=[class_name])
def testTypeNamesCanBeImported(self):
# If import doesn't work, pickling won't work either.
pb = unittest_pb2.TestAllTypes()
self.assertImportFromName(pb.repeated_int32, 'Scalar')
self.assertImportFromName(pb.repeated_nested_message, 'Composite')
if __name__ == '__main__':
basetest.main()
|
tensorflow/python/ops/control_flow_v2_func_graphs.py
|
EricRemmerswaal/tensorflow
| 190,993 |
141501
|
<reponame>EricRemmerswaal/tensorflow<gh_stars>1000+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraphs for V2 control flow."""
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
class ControlFlowFuncGraph(func_graph.FuncGraph):
"""Contains control flow-specific FuncGraph logic."""
def __init__(self, *args, **kwargs):
super(ControlFlowFuncGraph, self).__init__(*args, **kwargs)
outer_graph = self.outer_graph
# Unlike tf.function, control flow FuncGraphs are generally created one per
# op. This means hard-coding any outer device scopes in the body (rather
# than inspecting the call-time placement of the control flow op) makes
# sense.
self._device_function_stack = outer_graph._device_function_stack.copy() # pylint: disable=protected-access
self.is_control_flow_graph = True
if ops.executing_eagerly_outside_functions():
func_graph.override_func_graph_name_scope(
self, self.outer_graph.get_name_scope())
class CondBranchFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for branches of tf.cond().
This is used to distinguish cond branches from other functions.
"""
class WhileCondFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for the condition of tf.while_loop().
This is used to distinguish while conditions from other functions.
"""
class WhileBodyFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for the body of tf.while_loop().
This is used to distinguish while bodies from other functions.
"""
|
jenkins_job_wrecker/__init__.py
|
chenhuahuan/jenkins-job-wrecker
| 210 |
141507
|
__version__ = '1.7.1'
|
tests/test_socfaker_timestamp.py
|
priamai/soc-faker
| 122 |
141517
|
<reponame>priamai/soc-faker
def test_socfaker_timestamp_in_the_past(socfaker_fixture):
assert socfaker_fixture.timestamp.in_the_past()
def test_socfaker_timestamp_in_the_future(socfaker_fixture):
assert socfaker_fixture.timestamp.in_the_future()
def test_socfaker_timestamp_current(socfaker_fixture):
assert socfaker_fixture.timestamp.current
def test_socfaker_timestamp_date_string(socfaker_fixture):
assert socfaker_fixture.timestamp.date_string
|
xfel/cxi/cspad_ana/xes_faster_histograms.py
|
dperl-sol/cctbx_project
| 155 |
141528
|
"""Specialized version of xes_histograms.
1) no support for background region of interest
2) photon_counting method only; uses integrated area under 1-photon Gaussian
3) no support for >1 photon
4) no multiprocessing
5) no output of gain map; no input gain correction
6) Fixed constraints for ratio of peak widths 1-photon: 0-photon
7) Fixed constraints for ratio of 1-photon gain: 0-photon sigma
8) 60-fold speed improvement over xes_histograms.py; takes 7 seconds.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import math
from libtbx import easy_pickle
import iotbx.phil
from scitbx.array_family import flex
import scitbx.math
from xfel.command_line import view_pixel_histograms
from xfel.cxi.cspad_ana import cspad_tbx
from xfel.cxi.cspad_ana import xes_finalise
from scitbx.lstbx import normal_eqns
from scitbx.lstbx import normal_eqns_solving
from scitbx.math import curve_fitting
from xfel.cxi.cspad_ana.xes_histograms import master_phil_str
from six.moves import range
master_phil_str = master_phil_str + """
xes {
fudge_factor {
gain_to_sigma = 6.75
.type = float
.help = On the assumption that one-mean is zero_mean + zero_sigma * gain_to_sigma
.help = with gain_to_sigma being a constant for the pixel array detector.
.help = approx 6.75 for LB67 r0100
.help = manually optimized for LG36: r0025,6.0 / r0080,5.8 (MnCl2) / r0137,5.9 (PSII solution)
}
fit_limits = (20,150)
.type = ints(size=2)
.help = x-Limits for histogram fitting, relative to the presumably -50 ADU histogram origin
.help = 20,150 used for LG36
}
"""
Usage = """cctbx.python xes_faster_histograms.py output_dirname=[outdir] \
roi=0:388,99:126 [datadir]/hist_r0[run_no].pickle run=[run_no] fudge_factor.gain_to_sigma=5.9
...converts histogram.pickle file (described elsewhere) into spectrum by fitting
0- and 1-photon Gaussians to histograms representing each pixel on the XES spectrometer."""
def run(args):
if len(args)==0: print(Usage); exit()
processed = iotbx.phil.process_command_line(
args=args, master_string=master_phil_str)
args = processed.remaining_args
work_params = processed.work.extract().xes
processed.work.show()
assert len(args) == 1
output_dirname = work_params.output_dirname
roi = cspad_tbx.getOptROI(work_params.roi)
bg_roi = cspad_tbx.getOptROI(work_params.bg_roi)
gain_map_path = work_params.gain_map
estimated_gain = work_params.estimated_gain
print(output_dirname)
if output_dirname is None:
output_dirname = os.path.join(os.path.dirname(args[0]), "finalise")
print(output_dirname)
hist_d = easy_pickle.load(args[0])
if len(hist_d)==2:
hist_d = hist_d['histogram']
pixel_histograms = faster_methods_for_pixel_histograms(
hist_d, work_params)
result = xes_from_histograms(
pixel_histograms, output_dirname=output_dirname,
gain_map_path=gain_map_path, estimated_gain=estimated_gain,
roi=roi, run=work_params.run)
class xes_from_histograms(object):
def __init__(self, pixel_histograms, output_dirname=".", gain_map_path=None,
gain_map=None, estimated_gain=30,roi=None,run=None):
self.sum_img = flex.double(flex.grid(370,391), 0) # XXX define the image size some other way?
gain_img = flex.double(self.sum_img.accessor(), 0)
assert [gain_map, gain_map_path].count(None) > 0
if gain_map_path is not None:
d = easy_pickle.load(gain_map_path)
gain_map = d["DATA"]
mask = flex.int(self.sum_img.accessor(), 0)
start_row = 370
end_row = 0
print(len(pixel_histograms.histograms))
pixels = list(pixel_histograms.pixels())
n_pixels = len(pixels)
if roi is not None:
for k, (i, j) in enumerate(reversed(pixels)):
if ( i < roi[2]
or i > roi[3]
or j < roi[0]
or j > roi[1]):
del pixels[n_pixels-k-1]
if gain_map is None:
fixed_func = pixel_histograms.fit_one_histogram
else:
def fixed_func(pixel):
return pixel_histograms.fit_one_histogram(pixel, n_gaussians=1)
chi_squared_list=flex.double()
for i, pixel in enumerate(pixels):
#print i,pixel
LEG = False
start_row = min(start_row, pixel[0])
end_row = max(end_row, pixel[0])
n_photons = 0
try:
if LEG: gaussians, two_photon_flag = pixel_histograms.fit_one_histogram(pixel)
alt_gaussians = pixel_histograms.fit_one_histogram_two_gaussians(pixel)
except ZeroDivisionError:
print("HEY DIVIDE BY ZERO")
#pixel_histograms.plot_combo(pixel, gaussians)
mask[pixel] = 1
continue
except RuntimeError as e:
print("Error fitting pixel %s" %str(pixel))
print(str(e))
mask[pixel] = 1
continue
hist = pixel_histograms.histograms[pixel]
if not LEG:
gs = alt_gaussians[1].params
fit_photons = gs[0] * gs[2] * math.sqrt(2.*math.pi)
n_photons = int(round(fit_photons,0))
fit_interpretation=pixel_histograms.multiphoton_and_fit_residual(
pixel_histograms.histograms[pixel], alt_gaussians)
multi_photons = fit_interpretation.get_multiphoton_count()
total_photons = n_photons + multi_photons
if False and n_photons< 0: # Generally, do not mask negative values; if fit is still OK
print("\n%d pixel %s altrn %d photons from curvefitting"%( i,pixel,n_photons ))
pixel_histograms.plot_combo(pixel, alt_gaussians,
interpretation=fit_interpretation)
mask[pixel]=1 # do not mask out negative pixels if the Gaussian fit is good
continue
chi_squared_list.append(fit_interpretation.chi_squared())
suspect = False # don't know the optimal statistical test. Histograms vary primarily by total count & # photons
if total_photons <= 3:
if fit_interpretation.chi_squared() > 2.5 or fit_interpretation.quality_factor < 5: suspect=True
elif 3 < total_photons <= 10:
if fit_interpretation.chi_squared() > 5 or fit_interpretation.quality_factor < 10: suspect=True
elif 10 < total_photons <= 33:
if fit_interpretation.chi_squared() > 10 or fit_interpretation.quality_factor < 20: suspect=True
elif 33 < total_photons <= 100:
if fit_interpretation.chi_squared() > 20 or fit_interpretation.quality_factor < 20: suspect=True
elif 100 < total_photons <= 330:
if fit_interpretation.chi_squared() > 30 or fit_interpretation.quality_factor < 25: suspect=True
elif 330 < total_photons <= 1000:
if fit_interpretation.chi_squared() > 40 or fit_interpretation.quality_factor < 30: suspect=True
elif 1000 < total_photons:
if fit_interpretation.chi_squared() > 50 or fit_interpretation.quality_factor < 30: suspect=True
if suspect:
print("\n%d pixel %s Bad quality 0/1-photon fit"%(i,pixel),fit_interpretation.quality_factor)
print(" with chi-squared %10.5f"%fit_interpretation.chi_squared())
print(" Suspect",suspect)
print("%d fit photons, %d total photons"%(n_photons,total_photons))
#pixel_histograms.plot_combo(pixel, alt_gaussians,
# interpretation=fit_interpretation)
mask[pixel]=1
continue
self.sum_img[pixel] = n_photons + multi_photons
mask.set_selected(self.sum_img == 0, 1)
unbound_pixel_mask = xes_finalise.cspad_unbound_pixel_mask()
mask.set_selected(unbound_pixel_mask > 0, 1)
bad_pixel_mask = xes_finalise.cspad2x2_bad_pixel_mask_cxi_run7()
mask.set_selected(bad_pixel_mask > 0, 1)
for row in range(self.sum_img.all()[0]):
self.sum_img[row:row+1,:].count(0)
spectrum_focus = self.sum_img[start_row:end_row,:]
mask_focus = mask[start_row:end_row,:]
spectrum_focus.set_selected(mask_focus > 0, 0)
xes_finalise.filter_outlying_pixels(spectrum_focus, mask_focus)
print("Number of rows: %i" %spectrum_focus.all()[0])
print("Estimated no. photons counted: %i" %flex.sum(spectrum_focus))
print("Number of images used: %i" %flex.sum(
pixel_histograms.histograms.values()[0].slots()))
d = cspad_tbx.dpack(
address='CxiSc1-0|Cspad2x2-0',
data=spectrum_focus,
distance=1,
ccd_image_saturation=2e8, # XXX
)
if run is not None: runstr="_%04d"%run
else: runstr=""
cspad_tbx.dwritef(d, output_dirname, 'sum%s_'%runstr)
plot_x, plot_y = xes_finalise.output_spectrum(
spectrum_focus.iround(), mask_focus=mask_focus,
output_dirname=output_dirname, run=run)
self.spectrum = (plot_x, plot_y)
self.spectrum_focus = spectrum_focus
xes_finalise.output_matlab_form(spectrum_focus, "%s/sum%s.m" %(output_dirname,runstr))
print(output_dirname)
print("Average chi squared is",flex.mean(chi_squared_list),"on %d shots"%flex.sum(hist.slots()))
SIGMAFAC = 1.15
class faster_methods_for_pixel_histograms(view_pixel_histograms.pixel_histograms):
def __init__(self,hist_dict,work_params):
self.work_params = work_params
super(faster_methods_for_pixel_histograms,self
).__init__(hist_dict,work_params.estimated_gain)
def plot_combo(self, pixel, gaussians,
window_title=None, title=None,
log_scale=False, normalise=False, save_image=False, interpretation=None):
histogram = self.histograms[pixel]
from matplotlib import pyplot
from xfel.command_line.view_pixel_histograms import hist_outline
slots = histogram.slots().as_double()
if normalise:
normalisation = (flex.sum(slots) + histogram.n_out_of_slot_range()) / 1e5
print("normalising by factor: ", normalisation)
slots /= normalisation
bins, data = hist_outline(histogram)
if log_scale:
data.set_selected(data == 0, 0.1) # otherwise lines don't get drawn when we have some empty bins
pyplot.yscale("log")
pyplot.plot(bins, data, '-k', linewidth=2)
pyplot.plot(bins, data/1000., '-k', linewidth=2)
pyplot.suptitle(title)
data_min = min([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0])
data_max = max([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0])
pyplot.xlim(data_min, data_max+histogram.slot_width())
pyplot.xlim(-50, 100)
pyplot.ylim(-10, 40)
x = histogram.slot_centers()
for g in gaussians:
print("Height %7.2f mean %4.1f sigma %3.1f"%(g.params))
pyplot.plot(x, g(x), linewidth=2)
if interpretation is not None:
interpretation.plot_multiphoton_fit(pyplot)
interpretation.plot_quality(pyplot)
pyplot.show()
@staticmethod
def multiphoton_and_fit_residual(histogram,gaussians):
class per_pixel_analysis:
def __init__(OO):
#OK let's figure stuff out about the multiphoton residual, after fitting with 0 + 1 photons
# only count the residual for x larger than one_mean + 3*zero_sigma
x = histogram.slot_centers()
y_calc = flex.double(x.size(), 0)
for g in gaussians:
y_calc += g(x)
xfloor = gaussians[1].params[1] + 3.*gaussians[0].params[2]
selection = (histogram.slot_centers()>xfloor)
OO.fit_xresid = histogram.slot_centers().select(selection)
OO.fit_yresid = histogram.slots().as_double().select(selection) - y_calc.select(selection)
OO.xweight = (OO.fit_xresid - gaussians[0].params[1])/(gaussians[1].params[1] - gaussians[0].params[1])
OO.additional_photons = flex.sum( OO.xweight * OO.fit_yresid )
#Now the other half of the data; the part supposedly fit by the 0- and 1-photon gaussians
OO.qual_xresid = histogram.slot_centers().select(~selection)
ysignal = histogram.slots().as_double().select(~selection)
OO.qual_yresid = ysignal - y_calc.select(~selection)
# Not sure how to treat weights for channels with zero observations; default to 1
_variance = ysignal.deep_copy().set_selected(ysignal==0., 1.)
OO.weight = 1./_variance
OO.weighted_numerator = OO.weight * (OO.qual_yresid * OO.qual_yresid)
OO.sumsq_signal = flex.sum(ysignal * ysignal)
OO.sumsq_residual = flex.sum(OO.qual_yresid * OO.qual_yresid)
def get_multiphoton_count(OO):
# XXX insert a test here as to whether the analysis has been carried out
# far enough along x-axis to capture all the high multi-photon signal
# if not, raise an exception
return int(round(OO.additional_photons,0))
def plot_multiphoton_fit(OO,plotter):
print("counted %.0f multiphoton photons on this pixel"%OO.additional_photons)
plotter.plot(OO.fit_xresid, 10*OO.xweight, "b.")
plotter.plot(OO.fit_xresid,OO.fit_yresid,"r.")
def plot_quality(OO,plotter):
plotter.plot(OO.qual_xresid,OO.qual_yresid/10.,"m.")
print(OO.sumsq_signal,OO.sumsq_residual, OO.quality_factor, math.sqrt(OO.sumsq_signal))
def chi_squared(OO):
return flex.sum(OO.weighted_numerator)/len(OO.weighted_numerator)
E = per_pixel_analysis()
E.quality_factor = E.sumsq_signal/E.sumsq_residual
return E
def fit_one_histogram_two_gaussians(self,pixel):
histogram = self.histograms[pixel]
fitted_gaussians = []
GAIN_TO_SIGMA = self.work_params.fudge_factor.gain_to_sigma
low_idx = self.work_params.fit_limits[0]
high_idx = self.work_params.fit_limits[1]
slot_centers = histogram.slot_centers()
free_x = slot_centers[low_idx:high_idx]
#print list(free_x)
slots = histogram.slots().as_double()
free_y = slots[low_idx:high_idx]
# zero_mean = 0. # originally intended mean=0
maxidx = flex.max_index(free_y) # but if dark subtraction (pedstal correction) is off
zero_mean = free_x[maxidx] # use this non-zero maximum instead
zero_amplitude = flex.max(free_y)
assert 1./zero_amplitude #guard against division by zero
total_population = flex.sum(free_y)
zero_sigma = self.estimated_gain / GAIN_TO_SIGMA
one_amplitude = 0.001
helper = self.per_pixel_helper_factory(initial_estimates =
(zero_mean, 1.0, zero_sigma, one_amplitude),
GAIN_TO_SIGMA=GAIN_TO_SIGMA,
free_x = free_x,
free_y = free_y/zero_amplitude) # put y values on 0->1 scale for normal eqn solving
helper.restart()
iterations = normal_eqns_solving.levenberg_marquardt_iterations(
non_linear_ls = helper,
n_max_iterations = 7,
gradient_threshold = 1.E-3)
#print "current values after iterations", list(helper.x),
fitted_gaussians = helper.as_gaussians()
for item in fitted_gaussians: item.params = (item.params[0] * zero_amplitude,
item.params[1], item.params[2]) # convert back to full scale
return fitted_gaussians
@staticmethod
def per_pixel_helper_factory(initial_estimates,GAIN_TO_SIGMA,free_x,free_y):
from xfel.vonHamos import gaussian_fit_inheriting_from_non_linear_ls
class per_pixel_helper(gaussian_fit_inheriting_from_non_linear_ls, normal_eqns.non_linear_ls_mixin):
def __init__(pfh):
super(per_pixel_helper, pfh).__init__(n_parameters=4)
pfh.x_0 = flex.double(initial_estimates)
pfh.restart()
pfh.set_cpp_data(free_x,free_y,gain_to_sigma=GAIN_TO_SIGMA,sigmafac=SIGMAFAC)
def restart(pfh):
pfh.x = pfh.x_0.deep_copy()
pfh.old_x = None
def step_forward(pfh):
pfh.old_x = pfh.x.deep_copy()
pfh.x += pfh.step()
def step_backward(pfh):
assert pfh.old_x is not None
pfh.x, pfh.old_x = pfh.old_x, None
def parameter_vector_norm(pfh):
return pfh.x.norm()
def build_up(pfh, objective_only=False):
pfh.reset()
#rely on C++ and go directly for add_equation singular
pfh.access_cpp_build_up_directly(objective_only, current_values = pfh.x)
def as_gaussians(pfh):
return [curve_fitting.gaussian( a = pfh.x[1], b = pfh.x[0], c = pfh.x[2] ),
curve_fitting.gaussian( a = pfh.x[3], b = pfh.x[0] + pfh.x[2] * GAIN_TO_SIGMA,
c = pfh.x[2] * SIGMAFAC )]
value = per_pixel_helper()
return value
if __name__ == '__main__':
run(sys.argv[1:])
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/__init__.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
141548
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AccessUri
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import CreationData
from ._models_py3 import Disk
from ._models_py3 import DiskAccess
from ._models_py3 import DiskAccessList
from ._models_py3 import DiskAccessUpdate
from ._models_py3 import DiskEncryptionSet
from ._models_py3 import DiskEncryptionSetList
from ._models_py3 import DiskEncryptionSetUpdate
from ._models_py3 import DiskList
from ._models_py3 import DiskSku
from ._models_py3 import DiskUpdate
from ._models_py3 import Encryption
from ._models_py3 import EncryptionSetIdentity
from ._models_py3 import EncryptionSettingsCollection
from ._models_py3 import EncryptionSettingsElement
from ._models_py3 import GrantAccessData
from ._models_py3 import ImageDiskReference
from ._models_py3 import InnerError
from ._models_py3 import KeyVaultAndKeyReference
from ._models_py3 import KeyVaultAndSecretReference
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import Resource
from ._models_py3 import ResourceUriList
from ._models_py3 import ShareInfoElement
from ._models_py3 import Snapshot
from ._models_py3 import SnapshotList
from ._models_py3 import SnapshotSku
from ._models_py3 import SnapshotUpdate
from ._models_py3 import SourceVault
except (SyntaxError, ImportError):
from ._models import AccessUri # type: ignore
from ._models import ApiError # type: ignore
from ._models import ApiErrorBase # type: ignore
from ._models import CreationData # type: ignore
from ._models import Disk # type: ignore
from ._models import DiskAccess # type: ignore
from ._models import DiskAccessList # type: ignore
from ._models import DiskAccessUpdate # type: ignore
from ._models import DiskEncryptionSet # type: ignore
from ._models import DiskEncryptionSetList # type: ignore
from ._models import DiskEncryptionSetUpdate # type: ignore
from ._models import DiskList # type: ignore
from ._models import DiskSku # type: ignore
from ._models import DiskUpdate # type: ignore
from ._models import Encryption # type: ignore
from ._models import EncryptionSetIdentity # type: ignore
from ._models import EncryptionSettingsCollection # type: ignore
from ._models import EncryptionSettingsElement # type: ignore
from ._models import GrantAccessData # type: ignore
from ._models import ImageDiskReference # type: ignore
from ._models import InnerError # type: ignore
from ._models import KeyVaultAndKeyReference # type: ignore
from ._models import KeyVaultAndSecretReference # type: ignore
from ._models import PrivateEndpoint # type: ignore
from ._models import PrivateEndpointConnection # type: ignore
from ._models import PrivateLinkResource # type: ignore
from ._models import PrivateLinkResourceListResult # type: ignore
from ._models import PrivateLinkServiceConnectionState # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceUriList # type: ignore
from ._models import ShareInfoElement # type: ignore
from ._models import Snapshot # type: ignore
from ._models import SnapshotList # type: ignore
from ._models import SnapshotSku # type: ignore
from ._models import SnapshotUpdate # type: ignore
from ._models import SourceVault # type: ignore
from ._compute_management_client_enums import (
AccessLevel,
DiskCreateOption,
DiskEncryptionSetIdentityType,
DiskEncryptionSetType,
DiskState,
DiskStorageAccountTypes,
EncryptionType,
HyperVGeneration,
NetworkAccessPolicy,
OperatingSystemTypes,
PrivateEndpointConnectionProvisioningState,
PrivateEndpointServiceConnectionStatus,
SnapshotStorageAccountTypes,
)
__all__ = [
'AccessUri',
'ApiError',
'ApiErrorBase',
'CreationData',
'Disk',
'DiskAccess',
'DiskAccessList',
'DiskAccessUpdate',
'DiskEncryptionSet',
'DiskEncryptionSetList',
'DiskEncryptionSetUpdate',
'DiskList',
'DiskSku',
'DiskUpdate',
'Encryption',
'EncryptionSetIdentity',
'EncryptionSettingsCollection',
'EncryptionSettingsElement',
'GrantAccessData',
'ImageDiskReference',
'InnerError',
'KeyVaultAndKeyReference',
'KeyVaultAndSecretReference',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateLinkResource',
'PrivateLinkResourceListResult',
'PrivateLinkServiceConnectionState',
'Resource',
'ResourceUriList',
'ShareInfoElement',
'Snapshot',
'SnapshotList',
'SnapshotSku',
'SnapshotUpdate',
'SourceVault',
'AccessLevel',
'DiskCreateOption',
'DiskEncryptionSetIdentityType',
'DiskEncryptionSetType',
'DiskState',
'DiskStorageAccountTypes',
'EncryptionType',
'HyperVGeneration',
'NetworkAccessPolicy',
'OperatingSystemTypes',
'PrivateEndpointConnectionProvisioningState',
'PrivateEndpointServiceConnectionStatus',
'SnapshotStorageAccountTypes',
]
|
ml_editor/model_evaluation.py
|
VestiDev/ml-powered-applications-2020-book
| 542 |
141579
|
<filename>ml_editor/model_evaluation.py
from sklearn.calibration import calibration_curve
from sklearn.metrics import (
confusion_matrix,
roc_curve,
auc,
brier_score_loss,
accuracy_score,
f1_score,
precision_score,
recall_score,
)
import matplotlib.pyplot as plt
import numpy as np
import itertools
def get_confusion_matrix_plot(
predicted_y,
true_y,
classes=None,
normalize=False,
title="Confusion matrix",
cmap=plt.get_cmap("binary"),
figsize=(10, 10),
):
"""
Inspired by sklearn example
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
:param figsize: size of the output figure
:param predicted_y: model's predicted values
:param true_y: true value of the labels
:param classes: names of both classes
:param normalize: should we normalize the plot
:param title: plot title
:param cmap: colormap to use
:return: plot for the confusion matrix
"""
if classes is None:
classes = ["Low quality", "High quality"]
cm = confusion_matrix(true_y, predicted_y)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=figsize)
ax = plt.gca()
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
title_obj = plt.title(title, fontsize=30)
title_obj.set_position([0.5, 1.15])
plt.colorbar(im)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, fontsize=15)
plt.yticks(tick_marks, classes, fontsize=15)
fmt = ".2f" if normalize else "d"
thresh = (cm.max() - cm.min()) / 2.0 + cm.min()
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
fontsize=40,
)
plt.tight_layout()
plt.ylabel("True label", fontsize=20)
plt.xlabel("Predicted label", fontsize=20)
def get_roc_plot(
predicted_proba_y, true_y, tpr_bar=-1, fpr_bar=-1, figsize=(10, 10)
):
"""
Inspired by sklearn example
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html
:param fpr_bar: A threshold false positive value to draw
:param tpr_bar: A threshold false negative value to draw
:param figsize: size of the output figure
:param predicted_proba_y: the predicted probabilities of our model for each example
:param true_y: the true value of the label
:return:roc plot
"""
fpr, tpr, thresholds = roc_curve(true_y, predicted_proba_y)
roc_auc = auc(fpr, tpr)
plt.figure(figsize=figsize)
plt.plot(
fpr,
tpr,
lw=1,
alpha=1,
color="black",
label="ROC curve (AUC = %0.2f)" % roc_auc,
)
plt.plot(
[0, 1],
[0, 1],
linestyle="--",
lw=2,
color="grey",
label="Chance",
alpha=1,
)
# Cheating on position to make plot more readable
plt.plot(
[0.01, 0.01, 1],
[0.01, 0.99, 0.99],
linestyle=":",
lw=2,
color="green",
label="Perfect model",
alpha=1,
)
if tpr_bar != -1:
plt.plot(
[0, 1],
[tpr_bar, tpr_bar],
linestyle="-",
lw=2,
color="red",
label="TPR requirement",
alpha=1,
)
plt.fill_between([0, 1], [tpr_bar, tpr_bar], [1, 1], alpha=0, hatch="\\")
if fpr_bar != -1:
plt.plot(
[fpr_bar, fpr_bar],
[0, 1],
linestyle="-",
lw=2,
color="red",
label="FPR requirement",
alpha=1,
)
plt.fill_between([fpr_bar, 1], [1, 1], alpha=0, hatch="\\")
plt.legend(loc="lower right")
plt.ylabel("True positive rate", fontsize=20)
plt.xlabel("False positive rate", fontsize=20)
plt.xlim(0, 1)
plt.ylim(0, 1)
def get_calibration_plot(predicted_proba_y, true_y, figsize=(10, 10)):
"""
Inspired by sklearn example
https://scikit-learn.org/stable/auto_examples/calibration/plot_calibration_curve.html
:param figsize: size of the output figure
:param predicted_proba_y: the predicted probabilities of our model for each example
:param true_y: the true value of the label
:return: calibration plot
"""
plt.figure(figsize=figsize)
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
clf_score = brier_score_loss(
true_y, predicted_proba_y, pos_label=true_y.max()
)
print("\tBrier: %1.3f" % clf_score)
fraction_of_positives, mean_predicted_value = calibration_curve(
true_y, predicted_proba_y, n_bins=10
)
ax1.plot(
mean_predicted_value,
fraction_of_positives,
"s-",
color="black",
label="%1.3f Brier score (0 is best, 1 is worst)" % clf_score,
)
ax2.hist(
predicted_proba_y,
range=(0, 1),
bins=10,
histtype="step",
lw=2,
color="black",
)
ax1.set_ylabel("Fraction of positives")
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
ax1.legend(loc="lower right")
ax1.set_title("Calibration plot")
ax2.set_title("Probability distribution")
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
def get_metrics(predicted_y, true_y):
"""
Get standard metrics for binary classification
:param predicted_y: model's predicted values
:param true_y: true value of the labels
:return:
"""
# true positives / (true positives+false positives)
precision = precision_score(
true_y, predicted_y, pos_label=None, average="weighted"
)
# true positives / (true positives + false negatives)
recall = recall_score(
true_y, predicted_y, pos_label=None, average="weighted"
)
# harmonic mean of precision and recall
f1 = f1_score(true_y, predicted_y, pos_label=None, average="weighted")
# true positives + true negatives/ total
accuracy = accuracy_score(true_y, predicted_y)
return accuracy, precision, recall, f1
def get_feature_importance(clf, feature_names):
"""
Get a list of feature importances for a classifier
:param clf: a scikit-learn classifier
:param feature_names: a list of the names of features
in the order they were given to the classifier
:return: sorted list of tuples of the form (feat_name, score)
"""
importances = clf.feature_importances_
indices_sorted_by_importance = np.argsort(importances)[::-1]
return list(
zip(
feature_names[indices_sorted_by_importance],
importances[indices_sorted_by_importance],
)
)
def get_top_k(df, proba_col, true_label_col, k=5, decision_threshold=0.5):
"""
For binary classification problems
Returns k most correct and incorrect example for each class
Also returns k most unsure examples
:param df: DataFrame containing predictions, and true labels
:param proba_col: column name of predicted probabilities
:param true_label_col: column name of true labels
:param k: number of examples to show for each category
:param decision_threshold: classifier decision boundary to classify as positive
:return: correct_pos, correct_neg, incorrect_pos, incorrect_neg, unsure
"""
# Get correct and incorrect predictions
correct = df[
(df[proba_col] > decision_threshold) == df[true_label_col]
].copy()
incorrect = df[
(df[proba_col] > decision_threshold) != df[true_label_col]
].copy()
top_correct_positive = correct[correct[true_label_col]].nlargest(
k, proba_col
)
top_correct_negative = correct[~correct[true_label_col]].nsmallest(
k, proba_col
)
top_incorrect_positive = incorrect[incorrect[true_label_col]].nsmallest(
k, proba_col
)
top_incorrect_negative = incorrect[~incorrect[true_label_col]].nlargest(
k, proba_col
)
# Get closest examples to decision threshold
most_uncertain = df.iloc[
(df[proba_col] - decision_threshold).abs().argsort()[:k]
]
return (
top_correct_positive,
top_correct_negative,
top_incorrect_positive,
top_incorrect_negative,
most_uncertain,
)
|
tests/test_options.py
|
xiaowuhu/sklearn-onnx
| 323 |
141588
|
<filename>tests/test_options.py
# SPDX-License-Identifier: Apache-2.0
"""
Tests topology.
"""
import unittest
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import datasets
from skl2onnx import to_onnx, update_registered_converter
from skl2onnx.algebra.onnx_ops import OnnxIdentity, OnnxAdd
from test_utils import TARGET_OPSET
class DummyTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
TransformerMixin.__init__(self)
BaseEstimator.__init__(self)
def fit(self, X, y, sample_weight=None):
return self
def transform(self, X):
return X
def dummy_shape_calculator(operator):
op_input = operator.inputs[0]
operator.outputs[0].type.shape = op_input.type.shape
def dummy_converter(scope, operator, container):
X = operator.inputs[0]
out = operator.outputs
opv = container.target_opset
options = container.get_options(operator.raw_operator)
if len(options) == 0:
cst = numpy.array([57777], dtype=numpy.float32)
elif len(options) == 1:
opts = list(options.items())
if opts[0][0] == 'opt1':
if opts[0][1] is None:
cst = numpy.array([57789], dtype=numpy.float32)
elif opts[0][1]:
cst = numpy.array([57778], dtype=numpy.float32)
elif not opts[0][1]:
cst = numpy.array([57779], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
elif opts[0][0] == 'opt3':
if opts[0][1] is None:
cst = numpy.array([51789], dtype=numpy.float32)
elif opts[0][1] == 'r':
cst = numpy.array([56779], dtype=numpy.float32)
elif opts[0][1] == 't':
cst = numpy.array([58779], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
elif opts[0][0] == 'opt2':
if opts[0][1] is None:
cst = numpy.array([44444], dtype=numpy.float32)
elif isinstance(opts[0][1], int):
cst = numpy.array([opts[0][1]], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
id1 = OnnxIdentity(X, op_version=opv)
op = OnnxAdd(id1, cst, op_version=opv)
id2 = OnnxIdentity(op, output_names=out[:1],
op_version=opv)
id2.add_to(scope, container)
class TestOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
update_registered_converter(
DummyTransformer, "IdentityTransformer",
dummy_shape_calculator, dummy_converter,
options={'opt1': [False, True], 'opt2': None,
'opt3': ('r', 't'), 'opt4': -1})
def check_in(self, value, onx):
if str(value) not in str(onx):
raise AssertionError(
"Unable to find %r in\n%s" % (str(value), str(onx)))
def test_no_options(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET)
self.check_in('57777', model_onnx)
def test_options_list_true(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': True})
self.check_in('57778', model_onnx)
def test_options_list_false(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': False})
self.check_in('57779', model_onnx)
def test_options_list_outside_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': None})
self.check_in('57789', model_onnx)
def test_options_list_outside(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(ValueError):
# value not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': 'OUT'})
def test_options_integer(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(TypeError):
# integer not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt4': 44444})
def test_options_tuple1(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 't'})
self.check_in('58779', model_onnx)
def test_options_tuple2(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 'r'})
self.check_in('56779', model_onnx)
def test_options_tuple_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': None})
self.check_in('51789', model_onnx)
def test_options_tuple_out(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(ValueError):
# value not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 'G'})
def test_options_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt2': None})
self.check_in('44444', model_onnx)
def test_options_num(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt2': 33333})
self.check_in('33333', model_onnx)
if __name__ == "__main__":
unittest.main()
|
lib/luxi.py
|
modulus-sa/ganeti
| 396 |
141601
|
<reponame>modulus-sa/ganeti
#
#
# Copyright (C) 2006, 2007, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for the LUXI protocol
This module implements the local unix socket protocol. You only need
this module and the opcodes module in the client program in order to
communicate with the master.
The module is also used by the master daemon.
"""
from ganeti import constants
from ganeti import pathutils
from ganeti import objects
import ganeti.rpc.client as cl
from ganeti.rpc.errors import RequestError
from ganeti.rpc.transport import Transport
__all__ = [
# classes:
"Client"
]
REQ_SUBMIT_JOB = constants.LUXI_REQ_SUBMIT_JOB
REQ_SUBMIT_JOB_TO_DRAINED_QUEUE = constants.LUXI_REQ_SUBMIT_JOB_TO_DRAINED_QUEUE
REQ_SUBMIT_MANY_JOBS = constants.LUXI_REQ_SUBMIT_MANY_JOBS
REQ_PICKUP_JOB = constants.LUXI_REQ_PICKUP_JOB
REQ_WAIT_FOR_JOB_CHANGE = constants.LUXI_REQ_WAIT_FOR_JOB_CHANGE
REQ_CANCEL_JOB = constants.LUXI_REQ_CANCEL_JOB
REQ_ARCHIVE_JOB = constants.LUXI_REQ_ARCHIVE_JOB
REQ_CHANGE_JOB_PRIORITY = constants.LUXI_REQ_CHANGE_JOB_PRIORITY
REQ_AUTO_ARCHIVE_JOBS = constants.LUXI_REQ_AUTO_ARCHIVE_JOBS
REQ_QUERY = constants.LUXI_REQ_QUERY
REQ_QUERY_FIELDS = constants.LUXI_REQ_QUERY_FIELDS
REQ_QUERY_JOBS = constants.LUXI_REQ_QUERY_JOBS
REQ_QUERY_FILTERS = constants.LUXI_REQ_QUERY_FILTERS
REQ_REPLACE_FILTER = constants.LUXI_REQ_REPLACE_FILTER
REQ_DELETE_FILTER = constants.LUXI_REQ_DELETE_FILTER
REQ_QUERY_INSTANCES = constants.LUXI_REQ_QUERY_INSTANCES
REQ_QUERY_NODES = constants.LUXI_REQ_QUERY_NODES
REQ_QUERY_GROUPS = constants.LUXI_REQ_QUERY_GROUPS
REQ_QUERY_NETWORKS = constants.LUXI_REQ_QUERY_NETWORKS
REQ_QUERY_EXPORTS = constants.LUXI_REQ_QUERY_EXPORTS
REQ_QUERY_CONFIG_VALUES = constants.LUXI_REQ_QUERY_CONFIG_VALUES
REQ_QUERY_CLUSTER_INFO = constants.LUXI_REQ_QUERY_CLUSTER_INFO
REQ_QUERY_TAGS = constants.LUXI_REQ_QUERY_TAGS
REQ_SET_DRAIN_FLAG = constants.LUXI_REQ_SET_DRAIN_FLAG
REQ_SET_WATCHER_PAUSE = constants.LUXI_REQ_SET_WATCHER_PAUSE
REQ_ALL = constants.LUXI_REQ_ALL
DEF_RWTO = constants.LUXI_DEF_RWTO
WFJC_TIMEOUT = constants.LUXI_WFJC_TIMEOUT
class Client(cl.AbstractClient):
"""High-level client implementation.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, address=None, timeouts=None, transport=Transport):
"""Constructor for the Client class.
Arguments are the same as for L{AbstractClient}.
"""
super(Client, self).__init__(timeouts, transport)
# Override the version of the protocol:
self.version = constants.LUXI_VERSION
# Store the socket address
if address is None:
address = pathutils.QUERY_SOCKET
self.address = address
self._InitTransport()
def _GetAddress(self):
return self.address
def SetQueueDrainFlag(self, drain_flag):
return self.CallMethod(REQ_SET_DRAIN_FLAG, (drain_flag, ))
def SetWatcherPause(self, until):
return self.CallMethod(REQ_SET_WATCHER_PAUSE, (until, ))
def PickupJob(self, job):
return self.CallMethod(REQ_PICKUP_JOB, (job,))
def SubmitJob(self, ops):
ops_state = [op.__getstate__()
if not isinstance(op, objects.ConfigObject)
else op.ToDict(_with_private=True)
for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
ops_state = [op.__getstate__() for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB_TO_DRAINED_QUEUE, (ops_state, ))
def SubmitManyJobs(self, jobs):
jobs_state = []
for ops in jobs:
jobs_state.append([op.__getstate__() for op in ops])
return self.CallMethod(REQ_SUBMIT_MANY_JOBS, (jobs_state, ))
@staticmethod
def _PrepareJobId(request_name, job_id):
try:
return int(job_id)
except ValueError:
raise RequestError("Invalid parameter passed to %s as job id: "
" expected integer, got value %s" %
(request_name, job_id))
def CancelJob(self, job_id, kill=False):
job_id = Client._PrepareJobId(REQ_CANCEL_JOB, job_id)
return self.CallMethod(REQ_CANCEL_JOB, (job_id, kill))
def ArchiveJob(self, job_id):
job_id = Client._PrepareJobId(REQ_ARCHIVE_JOB, job_id)
return self.CallMethod(REQ_ARCHIVE_JOB, (job_id, ))
def ChangeJobPriority(self, job_id, priority):
job_id = Client._PrepareJobId(REQ_CHANGE_JOB_PRIORITY, job_id)
return self.CallMethod(REQ_CHANGE_JOB_PRIORITY, (job_id, priority))
def AutoArchiveJobs(self, age):
timeout = (DEF_RWTO - 1) // 2
return self.CallMethod(REQ_AUTO_ARCHIVE_JOBS, (age, timeout))
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial,
timeout=WFJC_TIMEOUT):
"""Waits for changes on a job.
@param job_id: Job ID
@type fields: list
@param fields: List of field names to be observed
@type prev_job_info: None or list
@param prev_job_info: Previously received job information
@type prev_log_serial: None or int/long
@param prev_log_serial: Highest log serial number previously received
@type timeout: int/float
@param timeout: Timeout in seconds (values larger than L{WFJC_TIMEOUT} will
be capped to that value)
"""
assert timeout >= 0, "Timeout can not be negative"
return self.CallMethod(REQ_WAIT_FOR_JOB_CHANGE,
(job_id, fields, prev_job_info,
prev_log_serial,
min(WFJC_TIMEOUT, timeout)))
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
job_id = Client._PrepareJobId(REQ_WAIT_FOR_JOB_CHANGE, job_id)
while True:
result = self.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial)
if result != constants.JOB_NOTCHANGED:
break
return result
def Query(self, what, fields, qfilter):
"""Query for resources/items.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: List of strings
@param fields: List of requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: L{objects.QueryResponse}
"""
result = self.CallMethod(REQ_QUERY, (what, fields, qfilter))
return objects.QueryResponse.FromDict(result)
def QueryFields(self, what, fields):
"""Query for available fields.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: None or list of strings
@param fields: List of requested fields
@rtype: L{objects.QueryFieldsResponse}
"""
result = self.CallMethod(REQ_QUERY_FIELDS, (what, fields))
return objects.QueryFieldsResponse.FromDict(result)
def QueryJobs(self, job_ids, fields):
return self.CallMethod(REQ_QUERY_JOBS, (job_ids, fields))
def QueryFilters(self, uuids, fields):
return self.CallMethod(REQ_QUERY_FILTERS, (uuids, fields))
def ReplaceFilter(self, uuid, priority, predicates, action, reason):
return self.CallMethod(REQ_REPLACE_FILTER,
(uuid, priority, predicates, action, reason))
def DeleteFilter(self, uuid):
return self.CallMethod(REQ_DELETE_FILTER, (uuid, ))
def QueryInstances(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_INSTANCES, (names, fields, use_locking))
def QueryNodes(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NODES, (names, fields, use_locking))
def QueryGroups(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_GROUPS, (names, fields, use_locking))
def QueryNetworks(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NETWORKS, (names, fields, use_locking))
def QueryExports(self, nodes, use_locking):
return self.CallMethod(REQ_QUERY_EXPORTS, (nodes, use_locking))
def QueryClusterInfo(self):
return self.CallMethod(REQ_QUERY_CLUSTER_INFO, ())
def QueryConfigValues(self, fields):
return self.CallMethod(REQ_QUERY_CONFIG_VALUES, (fields, ))
def QueryTags(self, kind, name):
return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
|
src/obfuscapk/obfuscators/advanced_reflection/advanced_reflection.py
|
Dado1513/Obfuscapk
| 688 |
141616
|
#!/usr/bin/env python3
import logging
import os
import re
from typing import List, Set
from obfuscapk import obfuscator_category
from obfuscapk import util
from obfuscapk.obfuscation import Obfuscation
class AdvancedReflection(obfuscator_category.ICodeObfuscator):
def __init__(self):
self.logger = logging.getLogger(
"{0}.{1}".format(__name__, self.__class__.__name__)
)
super().__init__()
self.methods_with_reflection: int = 0
# Keep track of the length of the added instructions for advanced reflection
# obfuscator, since there is a limit for the number of maximum instructions in
# a try catch block. Not all the instructions have the same length.
self.obfuscator_instructions_length: int = 0
self.obfuscator_instructions_limit: int = 60000
self.primitive_types: Set[str] = {"I", "Z", "B", "S", "J", "F", "D", "C"}
self.type_dict = {
"I": "Ljava/lang/Integer;",
"Z": "Ljava/lang/Boolean;",
"B": "Ljava/lang/Byte;",
"S": "Ljava/lang/Short;",
"J": "Ljava/lang/Long;",
"F": "Ljava/lang/Float;",
"D": "Ljava/lang/Double;",
"C": "Ljava/lang/Character;",
}
self.sget_dict = {
"I": "Ljava/lang/Integer;->TYPE:Ljava/lang/Class;",
"Z": "Ljava/lang/Boolean;->TYPE:Ljava/lang/Class;",
"B": "Ljava/lang/Byte;->TYPE:Ljava/lang/Class;",
"S": "Ljava/lang/Short;->TYPE:Ljava/lang/Class;",
"J": "Ljava/lang/Long;->TYPE:Ljava/lang/Class;",
"F": "Ljava/lang/Float;->TYPE:Ljava/lang/Class;",
"D": "Ljava/lang/Double;->TYPE:Ljava/lang/Class;",
"C": "Ljava/lang/Character;->TYPE:Ljava/lang/Class;",
}
self.cast_dict = {
"I": "Ljava/lang/Integer;->valueOf(I)Ljava/lang/Integer;",
"Z": "Ljava/lang/Boolean;->valueOf(Z)Ljava/lang/Boolean;",
"B": "Ljava/lang/Byte;->valueOf(B)Ljava/lang/Byte;",
"S": "Ljava/lang/Short;->valueOf(S)Ljava/lang/Short;",
"J": "Ljava/lang/Long;->valueOf(J)Ljava/lang/Long;",
"F": "Ljava/lang/Float;->valueOf(F)Ljava/lang/Float;",
"D": "Ljava/lang/Double;->valueOf(D)Ljava/lang/Double;",
"C": "Ljava/lang/Character;->valueOf(C)Ljava/lang/Character;",
}
self.reverse_cast_dict = {
"I": "Ljava/lang/Integer;->intValue()I",
"Z": "Ljava/lang/Boolean;->booleanValue()Z",
"B": "Ljava/lang/Byte;->byteValue()B",
"S": "Ljava/lang/Short;->shortValue()S",
"J": "Ljava/lang/Long;->longValue()J",
"F": "Ljava/lang/Float;->floatValue()F",
"D": "Ljava/lang/Double;->doubleValue()D",
"C": "Ljava/lang/Character;->charValue()C",
}
def split_method_params(self, param_string: str) -> List[str]:
params: List[str] = []
possible_classes = param_string.split(";")
for possible_class in possible_classes:
# Make sure the parameter list is not empty.
if possible_class:
if possible_class.startswith("L"):
# Class.
params.append("{0};".format(possible_class))
elif possible_class.startswith("["):
# Array + other optional parameters (e.g. [ILjava/lang/Object).
for string_position in range(1, len(possible_class)):
if possible_class[string_position] == "[":
# Multi-dimensional array, proceed with the next char.
continue
elif possible_class[string_position] == "L":
# Class array, no need to proceed with the next char.
params.append("{0};".format(possible_class))
break
else:
# Primitive type array, add it to the list and proceed with
# the rest of the string
params.append(possible_class[: string_position + 1])
params.extend(
self.split_method_params(
possible_class[string_position + 1 :]
)
)
break
elif possible_class[0] in self.primitive_types:
# Primitive type + other optional parameters
# (e.g. ILjava/lang/Object).
params.append(possible_class[0])
params.extend(self.split_method_params(possible_class[1:]))
return params
def count_needed_registers(self, params: List[str]) -> int:
needed_registers: int = 0
for param in params:
# Long and double variables need 2 registers.
if param == "J" or param == "D":
needed_registers += 2
else:
needed_registers += 1
return needed_registers
def add_smali_reflection_code(
self, class_name: str, method_name: str, param_string: str
) -> str:
params = self.split_method_params(param_string)
smali_code = "\n\tconst/4 v1, {param_num:#x}\n\n".format(param_num=len(params))
self.obfuscator_instructions_length += 1
if len(params) > 0:
smali_code += "\tnew-array v1, v1, [Ljava/lang/Class;\n\n"
self.obfuscator_instructions_length += 2
for param_index, param in enumerate(params):
smali_code += "\tconst/4 v2, {param_num:#x}\n\n".format(
param_num=param_index
)
self.obfuscator_instructions_length += 1
class_param = self.sget_dict.get(param, None)
if class_param:
smali_code += "\tsget-object v3, {param}\n\n".format(param=class_param)
self.obfuscator_instructions_length += 2
else:
smali_code += "\tconst-class v3, {param}\n\n".format(param=param)
self.obfuscator_instructions_length += 2
smali_code += "\taput-object v3, v1, v2\n\n"
self.obfuscator_instructions_length += 2
smali_code += (
"\tconst-class v2, {class_name}\n\n"
'\tconst-string v3, "{method_name}"\n\n'.format(
class_name=class_name, method_name=method_name
)
)
self.obfuscator_instructions_length += 4
smali_code += (
"\tinvoke-virtual {v2, v3, v1}, Ljava/lang/Class;->getDeclaredMethod("
"Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;\n\n"
)
self.obfuscator_instructions_length += 3
smali_code += (
"\tmove-result-object v1\n\n"
"\tsget-object v2, Lcom/apireflectionmanager/AdvancedApiReflection;->"
"obfuscatedMethods:Ljava/util/List;\n\n"
)
self.obfuscator_instructions_length += 3
smali_code += (
"\tinvoke-interface {v2, v1}, Ljava/util/List;->add(Ljava/lang/Object;)Z\n"
)
self.obfuscator_instructions_length += 3
return smali_code
def create_reflection_method(
self,
num_of_methods: int,
local_count: int,
is_virtual_method: bool,
invoke_registers: str,
invoke_parameters: str,
):
# Split method passed registers (if the method has no registers there is
# an empty line that has to be removed, that's why strip() is used).
invoke_registers = [
register.strip()
for register in invoke_registers.split(", ")
if register.strip()
]
params = self.split_method_params(invoke_parameters)
param_to_register: List[
List[str]
] = [] # list[i][0] = i-th param, list[i][1] = [i-th param register(s)]
if is_virtual_method:
# If this is a virtual method, the first register is the object instance
# and not a parameter.
register_index = 1
for param in params:
# Long and double variables need 2 registers.
if param == "J" or param == "D":
param_to_register.append(
[param, invoke_registers[register_index : register_index + 2]]
)
register_index += 2
else:
param_to_register.append(
[param, [invoke_registers[register_index]]]
)
register_index += 1
else:
# This is a static method, so we don't need a reference to the object
# instance. If this is a virtual method, the first register is the object
# instance and not a parameter.
register_index = 0
for param in params:
# Long and double variables need 2 registers.
if param == "J" or param == "D":
param_to_register.append(
[param, invoke_registers[register_index : register_index + 2]]
)
register_index += 2
else:
param_to_register.append(
[param, [invoke_registers[register_index]]]
)
register_index += 1
smali_code = "\tconst/4 #reg1#, {register_num:#x}\n\n".format(
register_num=len(params)
)
if len(params) > 0:
smali_code += "\tnew-array #reg1#, #reg1#, [Ljava/lang/Object;\n\n"
for param_index, param_and_register in enumerate(param_to_register):
# param_and_register[0] = parameter type
# param_and_register[1] = [register(s) holding the passed parameter(s)]
cast_primitive_to_class = self.cast_dict.get(
param_and_register[0], None
)
if cast_primitive_to_class:
if len(param_and_register[1]) > 1:
# 2 register parameter.
smali_code += (
"\tinvoke-static {{{register_pair}}}, {cast}\n\n"
"\tmove-result-object #reg2#\n\n".format(
register_pair=", ".join(param_and_register[1]),
cast=cast_primitive_to_class,
)
)
else:
smali_code += (
"\tinvoke-static {{{register}}}, {cast}\n\n"
"\tmove-result-object #reg2#\n\n".format(
register=param_and_register[1][0],
cast=cast_primitive_to_class,
)
)
smali_code += (
"\tconst/4 #reg4#, {param_index:#x}\n\n"
"\taput-object #reg2#, #reg1#, #reg4#\n\n".format(
param_index=param_index
)
)
else:
smali_code += (
"\tconst/4 #reg3#, {param_index:#x}\n\n"
"\taput-object {register}, #reg1#, #reg3#\n\n".format(
param_index=param_index, register=param_and_register[1][0]
)
)
smali_code += "\tconst/16 #reg3#, {method_num:#x}\n\n".format(
method_num=num_of_methods
)
if is_virtual_method:
smali_code += (
"\tinvoke-static {{#reg3#, {obj_instance}, #reg1#}}, "
"Lcom/apireflectionmanager/AdvancedApiReflection;->obfuscate("
"ILjava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;\n".format(
obj_instance=invoke_registers[0]
)
)
else:
smali_code += "\tconst/4 #reg4#, 0x0\n\n"
smali_code += (
"\tinvoke-static {#reg3#, #reg4#, #reg1#}, "
"Lcom/apireflectionmanager/AdvancedApiReflection;->"
"obfuscate(ILjava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;\n"
)
for index in range(0, 4):
smali_code = smali_code.replace(
"#reg{0}#".format(index + 1), "v{0}".format(local_count + index)
)
return smali_code
def obfuscate(self, obfuscation_info: Obfuscation):
self.logger.info('Running "{0}" obfuscator'.format(self.__class__.__name__))
try:
dangerous_api: Set[str] = set(util.get_dangerous_api())
obfuscator_smali_code: str = ""
move_result_pattern = re.compile(
r"\s+move-result.*?\s(?P<register>[vp0-9]+)"
)
for smali_file in util.show_list_progress(
obfuscation_info.get_smali_files(),
interactive=obfuscation_info.interactive,
description="Obfuscating dangerous APIs using reflection",
):
self.logger.debug(
'Obfuscating dangerous APIs using reflection in file "{0}"'.format(
smali_file
)
)
# There is no space for further reflection instructions.
if (
self.obfuscator_instructions_length
>= self.obfuscator_instructions_limit
):
break
with open(smali_file, "r", encoding="utf-8") as current_file:
lines = current_file.readlines()
# Line numbers where a method is declared.
method_index: List[int] = []
# For each method in method_index, True if there are enough registers
# to perform some operations by using reflection, False otherwise.
method_is_reflectable: List[bool] = []
# The number of local registers of each method in method_index.
method_local_count: List[int] = []
# Find the method declarations in this smali file.
for line_number, line in enumerate(lines):
method_match = util.method_pattern.match(line)
if method_match:
method_index.append(line_number)
param_count = self.count_needed_registers(
self.split_method_params(method_match.group("method_param"))
)
# Save the number of local registers of this method.
local_count = 16
local_match = util.locals_pattern.match(lines[line_number + 1])
if local_match:
local_count = int(local_match.group("local_count"))
method_local_count.append(local_count)
else:
# For some reason the locals declaration was not found where
# it should be, so assume the local registers are all used.
method_local_count.append(local_count)
# If there are enough registers available we can perform some
# reflection operations.
if param_count + local_count <= 11:
method_is_reflectable.append(True)
else:
method_is_reflectable.append(False)
# Look for method invocations of dangerous APIs inside the methods
# declared in this smali file and change normal invocations with
# invocations through reflection.
for method_number, index in enumerate(method_index):
# If there are enough registers for reflection operations, look for
# method invocations inside each method's body.
if method_is_reflectable[method_number]:
current_line_number = index
while not lines[current_line_number].startswith(".end method"):
# There is no space for further reflection instructions.
if (
self.obfuscator_instructions_length
>= self.obfuscator_instructions_limit
):
break
current_line_number += 1
invoke_match = util.invoke_pattern.match(
lines[current_line_number]
)
if invoke_match:
method = (
"{class_name}->{method_name}"
"({method_param}){method_return}".format(
class_name=invoke_match.group("invoke_object"),
method_name=invoke_match.group("invoke_method"),
method_param=invoke_match.group("invoke_param"),
method_return=invoke_match.group(
"invoke_return"
),
)
)
# Use reflection only if this method belongs to
# dangerous APIs.
if method not in dangerous_api:
continue
if (
invoke_match.group("invoke_type")
== "invoke-virtual"
):
tmp_is_virtual = True
elif (
invoke_match.group("invoke_type") == "invoke-static"
):
tmp_is_virtual = False
else:
continue
tmp_register = invoke_match.group("invoke_pass")
tmp_class_name = invoke_match.group("invoke_object")
tmp_method = invoke_match.group("invoke_method")
tmp_param = invoke_match.group("invoke_param")
tmp_return_type = invoke_match.group("invoke_return")
# Check if the method invocation result is used in the
# following lines.
for move_result_index in range(
current_line_number + 1,
min(current_line_number + 10, len(lines) - 1),
):
if "invoke-" in lines[move_result_index]:
# New method invocation, the previous method
# result is not used.
break
move_result_match = move_result_pattern.match(
lines[move_result_index]
)
if move_result_match:
tmp_result_register = move_result_match.group(
"register"
)
# Fix the move-result instruction after the
# method invocation.
new_move_result = ""
if tmp_return_type in self.primitive_types:
new_move_result += (
"\tmove-result-object "
"{result_register}\n\n"
"\tcheck-cast {result_register}, "
"{result_class}\n\n".format(
result_register=tmp_result_register,
result_class=self.type_dict[
tmp_return_type
],
)
)
new_move_result += "\tinvoke-virtual " "{{{result_register}}}, {cast}\n\n".format(
result_register=tmp_result_register,
cast=self.reverse_cast_dict[
tmp_return_type
],
)
if (
tmp_return_type == "J"
or tmp_return_type == "D"
):
new_move_result += (
"\tmove-result-wide "
"{result_register}\n".format(
result_register=tmp_result_register
)
)
else:
new_move_result += (
"\tmove-result "
"{result_register}\n".format(
result_register=tmp_result_register
)
)
else:
new_move_result += (
"\tmove-result-object "
"{result_register}\n\n"
"\tcheck-cast {result_register}, "
"{return_type}\n".format(
result_register=tmp_result_register,
return_type=tmp_return_type,
)
)
lines[move_result_index] = new_move_result
# Add the original method to the list of methods using
# reflection.
obfuscator_smali_code += self.add_smali_reflection_code(
tmp_class_name, tmp_method, tmp_param
)
# Change the original code with code using reflection.
lines[
current_line_number
] = self.create_reflection_method(
self.methods_with_reflection,
method_local_count[method_number],
tmp_is_virtual,
tmp_register,
tmp_param,
)
self.methods_with_reflection += 1
# Add the registers needed for performing reflection.
lines[index + 1] = "\t.locals {0}\n".format(
method_local_count[method_number] + 4
)
with open(smali_file, "w", encoding="utf-8") as current_file:
current_file.writelines(lines)
# Add to the app the code needed for the reflection obfuscator. The code
# can be put in any smali directory, since it will be moved to the correct
# directory when rebuilding the application.
destination_dir = os.path.dirname(obfuscation_info.get_smali_files()[0])
destination_file = os.path.join(
destination_dir, "AdvancedApiReflection.smali"
)
with open(destination_file, "w", encoding="utf-8") as api_reflection_smali:
reflection_code = util.get_advanced_api_reflection_smali_code().replace(
"#!code_to_replace!#", obfuscator_smali_code
)
api_reflection_smali.write(reflection_code)
except Exception as e:
self.logger.error(
'Error during execution of "{0}" obfuscator: {1}'.format(
self.__class__.__name__, e
)
)
raise
finally:
obfuscation_info.used_obfuscators.append(self.__class__.__name__)
|
data_processing/filter_aichallenge2018_only_single_person.py
|
tucan9389/mobile-pose-estimation-for-TF2
| 122 |
141628
|
<filename>data_processing/filter_aichallenge2018_only_single_person.py
import os, json, shutil
# coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_train_20170909/keypoint_train_annotations_20170909.json"
# image_source_dir_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_train_20170909/keypoint_train_images_20170902"
# dst_coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/train/annotation.json"
# dst_image_dir_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/train/images"
image_source_dir_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_validation_20170911/keypoint_validation_images_20170911"
coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018/ai_challenger_keypoint_validation_20170911/keypoint_validation_annotations_20170911.json"
dst_coco_annotation_json_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/valid/annotation.json"
dst_image_dir_path = "/home/centos/datasets/ai_challenger_2018_single_person_only/valid/images"
# ================================================================================================
# ================================================================================================
# ================================================================================================
with open(coco_annotation_json_path, 'r') as f:
annotation_json_info = json.loads(f.read())
# {'url': 'http://www.sinaimg.cn/dy/slidenews/4_img/2013_24/704_997547_218968.jpg', 'image_id': 'd8eeddddcc042544a2570d4c452778b912726720', 'keypoint_annotations': {'human3': [0, 0, 3, 0, 0, 3, 0, 0, 3, 67, 279, 1, 87, 365, 1, 65, 345, 1, 0, 0, 3, 0, 0, 3, 0, 0, 3, 40, 454, 1, 44, 554, 1, 0, 0, 3, 20, 179, 1, 17, 268, 1], 'human2': [444, 259, 1, 474, 375, 2, 451, 459, 1, 577, 231, 1, 632, 396, 1, 589, 510, 1, 490, 538, 1, 0, 0, 3, 0, 0, 3, 581, 535, 2, 0, 0, 3, 0, 0, 3, 455, 78, 1, 486, 205, 1], 'human1': [308, 306, 1, 290, 423, 1, 298, 528, 1, 433, 297, 1, 440, 404, 1, 447, 501, 2, 342, 530, 1, 0, 0, 3, 0, 0, 3, 417, 520, 1, 0, 0, 3, 0, 0, 3, 376, 179, 1, 378, 281, 1]}, 'human_annotations': {'human3': [0, 169, 114, 633], 'human2': [407, 59, 665, 632], 'human1': [265, 154, 461, 632]}}
print(annotation_json_info[0])
image_path = os.path.join(coco_annotation_json_path, f"{annotation_json_info[0]['image_id']}.jpg")
print(os.path.exists(image_path))
exit(0)
# dict_keys(['info', 'licenses', 'images', 'annotations', 'categories'])
print(annotation_json_info.keys())
print()
print()
# [{'supercategory': 'person', 'id': 1, 'name': 'person', 'keypoints': ['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle'], 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]}]
print(annotation_json_info['categories'])
print()
print()
# {'description': 'COCO 2017 Dataset', 'url': 'http://cocodataset.org', 'version': '1.0', 'year': 2017, 'contributor': 'COCO Consortium', 'date_created': '2017/09/01'}
print(annotation_json_info['info'])
print()
print()
# [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}]
print("annotation_info['licenses']:\n", annotation_json_info['licenses'])
exit(0)
image_infos = annotation_json_info['images']
annotation_infos = annotation_json_info['annotations']
print()
print("="*80)
print(annotation_infos[0])
# dict_keys(['segmentation', 'num_keypoints', 'area', 'iscrowd', 'keypoints', 'image_id', 'bbox', 'category_id', 'id'])
print(annotation_infos[0].keys())
annotation_infos_by_image_id = {}
for annotation_info in annotation_infos:
image_id = annotation_info['image_id']
if image_id in annotation_infos_by_image_id:
annotation_infos_by_image_id[image_id].append(annotation_info)
else:
annotation_infos_by_image_id[image_id] = [annotation_info]
image_ids = list(annotation_infos_by_image_id.keys())
maximum_anntated_num = max(list(map(lambda image_id: len(annotation_infos_by_image_id[image_id]), image_ids)))
minimum_anntated_num = min(list(map(lambda image_id: len(annotation_infos_by_image_id[image_id]), image_ids)))
print("max:", maximum_anntated_num, "min:", minimum_anntated_num)
print()
pnum_and_count = list(map(lambda num: (num, len(list(filter(lambda image_id: len(annotation_infos_by_image_id[image_id]) == num, image_ids)))), range(minimum_anntated_num, maximum_anntated_num+1)))
for person_num, image_num in pnum_and_count:
print("", person_num, "->", image_num)
"""train
max: 20 min: 1
1 -> 24832
2 -> 10730
3 -> 5889
4 -> 3889
5 -> 2726
6 -> 2104
7 -> 1691
8 -> 1411
9 -> 1238
10 -> 1198
11 -> 1226
12 -> 1137
13 -> 1323
14 -> 4705
15 -> 12
16 -> 2
17 -> 0
18 -> 1
19 -> 0
20 -> 1
"""
"""valid
max: 14 min: 1
1 -> 1045
2 -> 436
3 -> 268
4 -> 148
5 -> 119
6 -> 110
7 -> 67
8 -> 37
9 -> 60
10 -> 64
11 -> 44
12 -> 38
13 -> 47
14 -> 210
"""
print("=" * 80)
image_id_to_image_info = {}
for image_info in image_infos:
image_id_to_image_info[image_info['id']] = image_info
print("=" * 80)
single_person_image_ids = list(filter(lambda image_id: len(annotation_infos_by_image_id[image_id]) == 1, image_ids))
print(len(single_person_image_ids))
print()
sample_annotaiton_json_path = "/home/centos/datasets/ai_challenger_tucan9389/valid/annotation.json"
with open(sample_annotaiton_json_path, 'r') as f:
s_annotation_json_info = json.loads(f.read())
print("images num of ai_challenger_tucan9389/valid/annotation.json:", len(s_annotation_json_info['images']))
print("annots num of ai_challenger_tucan9389/valid/annotation.json:", len(s_annotation_json_info['annotations']))
print()
sample_annotaiton_json_path = "/home/centos/datasets/ai_challenger_tucan9389/train/annotation.json"
with open(sample_annotaiton_json_path, 'r') as f:
s_annotation_json_info = json.loads(f.read())
print("images num of ai_challenger_tucan9389/train/annotation.json:", len(s_annotation_json_info['images']))
print("annots num of ai_challenger_tucan9389/train/annotation.json:", len(s_annotation_json_info['annotations']))
print()
"""
images num of ai_challenger_tucan9389/valid/annotation.json: 1500
annots num of ai_challenger_tucan9389/valid/annotation.json: 1500
images num of ai_challenger_tucan9389/train/annotation.json: 22446
annots num of ai_challenger_tucan9389/train/annotation.json: 22446
"""
# dict_keys(['images', 'annotations', 'categories'])
print(s_annotation_json_info.keys())
# {'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
print(image_infos[0])
# {'file_name': '89faeae39d8dd03468085095452789e632bc9096.jpg', 'height': 681, 'width': 490, 'id': 0}
print(s_annotation_json_info['images'][0])
filtered_json_annotation_info = {}
filtered_json_annotation_info['categories'] = annotation_json_info['categories']
# image_infos
filtered_image_infos = list(map(lambda image_id: image_id_to_image_info[image_id], single_person_image_ids))
filtered_json_annotation_info['images'] = filtered_image_infos
print(len(filtered_image_infos))
# annotation_infos
filterted_annotation_infos = list(map(lambda image_id: annotation_infos_by_image_id[image_id][0], single_person_image_ids))
filtered_json_annotation_info['annotations'] = filterted_annotation_infos
print(len(filterted_annotation_infos))
print()
print("images num of new:", len(filtered_json_annotation_info['images']))
print("annots num of new:", len(filtered_json_annotation_info['annotations']))
"""valid
images num of new: 1045
annots num of new: 1045
"""
"""train
images num of new: 24832
annots num of new: 24832
"""
# ================================================================================================
# ================================================================================================
# ================================================================================================
for image_info in filtered_json_annotation_info['images']:
if not os.path.exists(os.path.join(image_source_dir_path, image_info['file_name'])):
print(f"ERR: no image file in {os.path.join(image_source_dir_path, image_info['file_name'])}")
exit(0)
print("============ NO error for file existing check ============")
print()
if not os.path.exists("/home/centos/datasets"):
os.mkdir("/home/centos/datasets")
if not os.path.exists("/home/centos/datasets/coco_single_person_only"):
os.mkdir("/home/centos/datasets/coco_single_person_only")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/train"):
os.mkdir("/home/centos/datasets/coco_single_person_only/train")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/train/images"):
os.mkdir("/home/centos/datasets/coco_single_person_only/train/images")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/valid"):
os.mkdir("/home/centos/datasets/coco_single_person_only/valid")
if not os.path.exists("/home/centos/datasets/coco_single_person_only/valid/images"):
os.mkdir("/home/centos/datasets/coco_single_person_only/valid/images")
# write annotation.json
print("=" * 80)
print("=" * 80)
print(f"WRITE START AT {dst_coco_annotation_json_path}")
with open(dst_coco_annotation_json_path, 'w') as fp:
json.dump(filtered_json_annotation_info, fp)
print(f"WRITE END AT {dst_coco_annotation_json_path}")
print("=" * 80)
print("=" * 80)
print()
# copy image files
echo_num = 100
pass_num = 0
copy_num = 0
total_num = len(filtered_json_annotation_info['images'])
print(f"START COPYING {total_num} FILES")
for idx, image_info in enumerate(filtered_json_annotation_info['images']):
src_image_path = os.path.join(image_source_dir_path, image_info['file_name'])
dst_image_path = os.path.join(dst_image_dir_path, image_info['file_name'])
if not os.path.exists(dst_image_path):
shutil.copyfile(src_image_path, dst_image_path)
copy_num += 1
else:
pass_num += 1
if (idx+1) % echo_num == 0:
print(f" >> {idx+1} / {total_num}, copy:{copy_num}, pass:{pass_num}")
print(f"END COPYING {total_num} FILES, copy:{copy_num}, pass:{pass_num}")
print("=" * 80)
print("=" * 80)
|
Compare Frontmost Fonts/Compare Composites of Two Frontmost Fonts.py
|
justanotherfoundry/Glyphs-Scripts
| 283 |
141642
|
<reponame>justanotherfoundry/Glyphs-Scripts
#MenuTitle: Compare Composites
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Reports diverging component structures of composite glyphs, e.g., iacute built with acutecomb in one font, and acutecomb.narrow in the other.
"""
Font1 = Glyphs.font
Font2 = Glyphs.fonts[1]
filePath1 = "~/"+Font1.filepath.relativePathFromBaseDirPath_("~")
filePath2 = "~/"+Font2.filepath.relativePathFromBaseDirPath_("~")
fileName1 = Font1.filepath.lastPathComponent()
fileName2 = Font2.filepath.lastPathComponent()
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
print("Comparing composites:\nFont 1: %s\nFont 2: %s\n\nFont 1: %s\nFont 2: %s\n" % ( fileName1, fileName2, filePath1, filePath2 ))
for g1 in [g for g in Font1.glyphs if g.export]:
glyphname = g1.name
g2 = Font2.glyphs[glyphname]
if g2:
for mi, m1 in enumerate(Font1.masters):
m2 = Font2.masters[mi]
l1 = g1.layers[m1.id]
l2 = g2.layers[m2.id]
composite1 = "+".join([c.componentName for c in l1.components])
composite2 = "+".join([c.componentName for c in l2.components])
if composite1 != composite2:
print("/%s : %s <> %s" % (glyphname, composite1, composite2))
else:
print(" %s not in ‘%s’" % (glyphname, fileName2))
|
PyPortal_EZ_Make_Oven/codecalibrate/code.py
|
gamblor21/Adafruit_Learning_System_Guides
| 665 |
141649
|
import time
import sys
import board
import busio
import digitalio
from adafruit_mcp9600 import MCP9600
SENSOR_ADDR = 0X67
i2c = busio.I2C(board.SCL, board.SDA,frequency=200000)
try:
sensor = MCP9600(i2c,SENSOR_ADDR,"K")
except ValueError as e:
print(e)
print("Unable to connect to the thermocouple sensor.")
sys.exit(1)
oven = digitalio.DigitalInOut(board.D4)
oven.direction = digitalio.Direction.OUTPUT
def oven_control(enable=False):
#board.D4
oven.value = enable
check_temp = 100
print("This program will determine calibration settings ")
print("for your oven to use with the EZ Make Oven.\n\n")
for i in range(10):
print("Calibration will start in %d seconds..." % (10-i))
time.sleep(1)
print("Starting...")
print("Calibrating oven temperature to %d C" % check_temp)
finish = False
oven_control(True)
maxloop=300
counter = 0
while not finish:
time.sleep(1)
counter += 1
current_temp = sensor.temperature
print("%.02f C" % current_temp)
if current_temp >= check_temp:
finish = True
oven_control(False)
if counter >= maxloop:
raise Exception("Oven not working or bad sensor")
print("checking oven lag time and temperature")
finish = False
start_time = time.monotonic()
start_temp = sensor.temperature
last_temp = start_temp
while not finish:
time.sleep(1)
current_temp = sensor.temperature
print(current_temp)
if current_temp <= last_temp:
finish = True
last_temp = current_temp
lag_temp = last_temp - check_temp
lag_time = int(time.monotonic() - start_time)
print("** Calibration Results **")
print("Modify config.json with these values for your oven:")
print("calibrate_temp:", lag_temp)
print("calibrate_seconds:",lag_time)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.