file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
product.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
class product_product(models.Model):
_inherit = 'product.product'
is_outlet = fields.Boolean('Is outlet', compute='_is_outlet')
normal_product_id = fields.Many2one('product.product', 'normal product')
outlet_product_ids = fields.One2many('product.product',
'normal_product_id',
'Outlet products')
@api.one
def _is_outlet(self):
outlet_cat = self.env.ref('product_outlet.product_category_outlet')
if self.categ_id == outlet_cat or \
self.categ_id.parent_id == outlet_cat:
self.is_outlet = True
else:
self.is_outlet = False
@api.model
def cron_update_outlet_price(self):
ou | tlet_categ_ids = []
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o1').id)
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o2').id)
outlet_products = self.env['product.product'].search([('categ_id', 'in', outlet_categ_ids),
('normal_product_id.list_price', '!=', 0)],
order="id desc")
for product_o in outlet_products:
origin_product = product_o.normal_product_id
price_outlet = origin_product.list_price * (1 - product_o.categ_id.percent / 100)
price_outlet2 = origin_product.list_price2 * (1 - product_o.categ_id.percent / 100)
price_outlet3 = origin_product.list_price3 * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd = origin_product.pvd1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd2 = origin_product.pvd2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd3 = origin_product.pvd3_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi = origin_product.pvi1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi2 = origin_product.pvi2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi3 = origin_product.pvi3_price * (1 - product_o.categ_id.percent / 100)
if round(product_o.list_price, 2) != round(price_outlet, 2) or \
round(product_o.list_price2, 2) != round(price_outlet2, 2) or \
round(product_o.list_price3, 2) != round(price_outlet3, 2) or \
round(product_o.pvd1_price, 2) != round(price_outlet_pvd, 2) or \
round(product_o.pvd2_price, 2) != round(price_outlet_pvd2, 2) or \
round(product_o.pvd3_price, 2) != round(price_outlet_pvd3, 2) or \
round(product_o.pvi1_price, 2) != round(price_outlet_pvi, 2) or \
round(product_o.pvi2_price, 2) != round(price_outlet_pvi2, 2) or \
round(product_o.pvi3_price, 2) != round(price_outlet_pvi3, 2) or \
round(product_o.commercial_cost, 2) != round(origin_product.commercial_cost, 2):
# update all prices
values = {
'standard_price': price_outlet,
'list_price': price_outlet,
'list_price2': price_outlet2,
'list_price3': price_outlet3,
'pvd1_price': price_outlet_pvd,
'pvd2_price': price_outlet_pvd2,
'pvd3_price': price_outlet_pvd3,
'pvi1_price': price_outlet_pvi,
'pvi2_price': price_outlet_pvi2,
'pvi3_price': price_outlet_pvi3,
'commercial_cost': origin_product.commercial_cost,
}
product_o.write(values)
| identifier_body |
|
darknet53_test.py | import pytoolkit as tk
module = tk.applications.darknet53
def | ():
model = module.create(input_shape=(256, 256, 3), weights=None)
assert tuple(module.get_1_over_1(model).shape[1:3]) == (256, 256)
assert tuple(module.get_1_over_2(model).shape[1:3]) == (128, 128)
assert tuple(module.get_1_over_4(model).shape[1:3]) == (64, 64)
assert tuple(module.get_1_over_8(model).shape[1:3]) == (32, 32)
assert tuple(module.get_1_over_16(model).shape[1:3]) == (16, 16)
assert tuple(module.get_1_over_32(model).shape[1:3]) == (8, 8)
def test_save_load(tmpdir):
model = module.create(input_shape=(256, 256, 3), weights=None)
tk.models.save(model, str(tmpdir / "model.h5"))
tk.models.load(str(tmpdir / "model.h5"))
| test_model | identifier_name |
darknet53_test.py | import pytoolkit as tk
module = tk.applications.darknet53
def test_model():
model = module.create(input_shape=(256, 256, 3), weights=None)
assert tuple(module.get_1_over_1(model).shape[1:3]) == (256, 256)
assert tuple(module.get_1_over_2(model).shape[1:3]) == (128, 128)
assert tuple(module.get_1_over_4(model).shape[1:3]) == (64, 64)
assert tuple(module.get_1_over_8(model).shape[1:3]) == (32, 32)
assert tuple(module.get_1_over_16(model).shape[1:3]) == (16, 16)
assert tuple(module.get_1_over_32(model).shape[1:3]) == (8, 8)
| def test_save_load(tmpdir):
model = module.create(input_shape=(256, 256, 3), weights=None)
tk.models.save(model, str(tmpdir / "model.h5"))
tk.models.load(str(tmpdir / "model.h5")) | random_line_split |
|
darknet53_test.py | import pytoolkit as tk
module = tk.applications.darknet53
def test_model():
|
def test_save_load(tmpdir):
model = module.create(input_shape=(256, 256, 3), weights=None)
tk.models.save(model, str(tmpdir / "model.h5"))
tk.models.load(str(tmpdir / "model.h5"))
| model = module.create(input_shape=(256, 256, 3), weights=None)
assert tuple(module.get_1_over_1(model).shape[1:3]) == (256, 256)
assert tuple(module.get_1_over_2(model).shape[1:3]) == (128, 128)
assert tuple(module.get_1_over_4(model).shape[1:3]) == (64, 64)
assert tuple(module.get_1_over_8(model).shape[1:3]) == (32, 32)
assert tuple(module.get_1_over_16(model).shape[1:3]) == (16, 16)
assert tuple(module.get_1_over_32(model).shape[1:3]) == (8, 8) | identifier_body |
1624396968492-InitMigration.ts | import {MigrationInterface, QueryRunner} from 'typeorm'
export class InitMigration1624396968492 implements MigrationInterface {
name = 'InitMigration1624396968492'
public async up(queryRunner: QueryRunner): Promise<void> |
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP TABLE "workout_set"`)
await queryRunner.query(`DROP TABLE "exercise"`)
await queryRunner.query(`DROP TABLE "user"`)
}
}
| {
await queryRunner.query(
`CREATE TABLE "user" ("uuid" varchar PRIMARY KEY NOT NULL, "email" varchar NOT NULL, "password" varchar NOT NULL, "firstName" varchar NOT NULL, "lastName" varchar NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), CONSTRAINT "UQ_e12875dfb3b1d92d7d7c5377e22" UNIQUE ("email"))`
)
await queryRunner.query(
`CREATE TABLE "exercise" ("uuid" varchar PRIMARY KEY NOT NULL, "name" varchar NOT NULL, "description" text, "hasRepetitions" boolean NOT NULL DEFAULT (0), "hasWeight" boolean NOT NULL DEFAULT (0), "hasTime" boolean NOT NULL DEFAULT (0), "hasDistance" boolean NOT NULL DEFAULT (0), "muscle" varchar NOT NULL, "isCardio" boolean NOT NULL DEFAULT (0), "isMachine" boolean NOT NULL DEFAULT (0), "isDumbbell" boolean NOT NULL DEFAULT (0), "isBarbell" boolean NOT NULL DEFAULT (0), "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "userUuid" varchar NOT NULL, CONSTRAINT "UQ_4420597915e901ab5d6f2bcaee4" UNIQUE ("name"), CONSTRAINT "FK_d20f00d3eabbfb7ff5f49b52e6b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
await queryRunner.query(
`CREATE TABLE "workout_set" ("uuid" varchar PRIMARY KEY NOT NULL, "repetitions" integer, "weight" integer, "time" varchar, "distance" integer, "notes" varchar, "executedAt" datetime NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "exerciseUuid" varchar NOT NULL, "userUuid" varchar NOT NULL, CONSTRAINT "FK_17ec7b6063fd05e55ec90554530" FOREIGN KEY ("exerciseUuid") REFERENCES "exercise" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT "FK_14877facf6d67175f772588959b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
} | identifier_body |
1624396968492-InitMigration.ts | import {MigrationInterface, QueryRunner} from 'typeorm'
export class | implements MigrationInterface {
name = 'InitMigration1624396968492'
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`CREATE TABLE "user" ("uuid" varchar PRIMARY KEY NOT NULL, "email" varchar NOT NULL, "password" varchar NOT NULL, "firstName" varchar NOT NULL, "lastName" varchar NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), CONSTRAINT "UQ_e12875dfb3b1d92d7d7c5377e22" UNIQUE ("email"))`
)
await queryRunner.query(
`CREATE TABLE "exercise" ("uuid" varchar PRIMARY KEY NOT NULL, "name" varchar NOT NULL, "description" text, "hasRepetitions" boolean NOT NULL DEFAULT (0), "hasWeight" boolean NOT NULL DEFAULT (0), "hasTime" boolean NOT NULL DEFAULT (0), "hasDistance" boolean NOT NULL DEFAULT (0), "muscle" varchar NOT NULL, "isCardio" boolean NOT NULL DEFAULT (0), "isMachine" boolean NOT NULL DEFAULT (0), "isDumbbell" boolean NOT NULL DEFAULT (0), "isBarbell" boolean NOT NULL DEFAULT (0), "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "userUuid" varchar NOT NULL, CONSTRAINT "UQ_4420597915e901ab5d6f2bcaee4" UNIQUE ("name"), CONSTRAINT "FK_d20f00d3eabbfb7ff5f49b52e6b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
await queryRunner.query(
`CREATE TABLE "workout_set" ("uuid" varchar PRIMARY KEY NOT NULL, "repetitions" integer, "weight" integer, "time" varchar, "distance" integer, "notes" varchar, "executedAt" datetime NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "exerciseUuid" varchar NOT NULL, "userUuid" varchar NOT NULL, CONSTRAINT "FK_17ec7b6063fd05e55ec90554530" FOREIGN KEY ("exerciseUuid") REFERENCES "exercise" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT "FK_14877facf6d67175f772588959b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP TABLE "workout_set"`)
await queryRunner.query(`DROP TABLE "exercise"`)
await queryRunner.query(`DROP TABLE "user"`)
}
}
| InitMigration1624396968492 | identifier_name |
1624396968492-InitMigration.ts | import {MigrationInterface, QueryRunner} from 'typeorm'
export class InitMigration1624396968492 implements MigrationInterface {
name = 'InitMigration1624396968492' | )
await queryRunner.query(
`CREATE TABLE "exercise" ("uuid" varchar PRIMARY KEY NOT NULL, "name" varchar NOT NULL, "description" text, "hasRepetitions" boolean NOT NULL DEFAULT (0), "hasWeight" boolean NOT NULL DEFAULT (0), "hasTime" boolean NOT NULL DEFAULT (0), "hasDistance" boolean NOT NULL DEFAULT (0), "muscle" varchar NOT NULL, "isCardio" boolean NOT NULL DEFAULT (0), "isMachine" boolean NOT NULL DEFAULT (0), "isDumbbell" boolean NOT NULL DEFAULT (0), "isBarbell" boolean NOT NULL DEFAULT (0), "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "userUuid" varchar NOT NULL, CONSTRAINT "UQ_4420597915e901ab5d6f2bcaee4" UNIQUE ("name"), CONSTRAINT "FK_d20f00d3eabbfb7ff5f49b52e6b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
await queryRunner.query(
`CREATE TABLE "workout_set" ("uuid" varchar PRIMARY KEY NOT NULL, "repetitions" integer, "weight" integer, "time" varchar, "distance" integer, "notes" varchar, "executedAt" datetime NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), "exerciseUuid" varchar NOT NULL, "userUuid" varchar NOT NULL, CONSTRAINT "FK_17ec7b6063fd05e55ec90554530" FOREIGN KEY ("exerciseUuid") REFERENCES "exercise" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION, CONSTRAINT "FK_14877facf6d67175f772588959b" FOREIGN KEY ("userUuid") REFERENCES "user" ("uuid") ON DELETE NO ACTION ON UPDATE NO ACTION)`
)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP TABLE "workout_set"`)
await queryRunner.query(`DROP TABLE "exercise"`)
await queryRunner.query(`DROP TABLE "user"`)
}
} |
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`CREATE TABLE "user" ("uuid" varchar PRIMARY KEY NOT NULL, "email" varchar NOT NULL, "password" varchar NOT NULL, "firstName" varchar NOT NULL, "lastName" varchar NOT NULL, "createdAt" datetime NOT NULL DEFAULT (datetime('now')), "updatedAt" datetime NOT NULL DEFAULT (datetime('now')), CONSTRAINT "UQ_e12875dfb3b1d92d7d7c5377e22" UNIQUE ("email"))` | random_line_split |
curve.rs | /// (c) David Alan Gilbert <[email protected]> 2016
/// Licensed under GPLv3, see the LICENSE file for a full copy
// A curve to interpolate between points
// This is currently a Quadratic Bezier; pretty simple.
use point_line::Pointf;
pub struct | {
pub start : Pointf,
pub control : Pointf,
pub end : Pointf,
}
// From https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B.C3.A9zier_curves
fn quad_interp(t: f64, s: f64, c: f64, e: f64) -> f64 {
(1.0-t)*(1.0-t)*s + 2.0*(1.0-t)*t*c + t*t*e
}
// Reworking the above to give c as the answer and specifying a
// t and result
fn find_control(s: f64, m: f64, e: f64, mid_t: f64) -> f64 {
(m-mid_t*mid_t*e-(1.0-mid_t)*(1.0-mid_t)*s) /
(2.0 * (1.0 - mid_t) * mid_t)
}
impl Bezierq {
// Return a point on the curve; t is 0 (start) -> 1 (end)
pub fn interp(&self, t: f64) -> Pointf {
Pointf { x: quad_interp(t, self.start.x, self.control.x, self.end.x),
y: quad_interp(t, self.start.y, self.control.y, self.end.y) }
}
// Return a curve that passes through the given points
// the 'mid' point happens at the specified 't' interpolation point
pub fn through(s: Pointf, m: Pointf, e: Pointf, mid_t: f64) -> Bezierq {
Bezierq { start: s, end: e,
control: Pointf {
x: find_control(s.x, m.x, e.x, mid_t),
y: find_control(s.y, m.y, e.y, mid_t) } }
}
}
| Bezierq | identifier_name |
curve.rs | /// (c) David Alan Gilbert <[email protected]> 2016
/// Licensed under GPLv3, see the LICENSE file for a full copy
// A curve to interpolate between points
// This is currently a Quadratic Bezier; pretty simple.
use point_line::Pointf;
pub struct Bezierq {
pub start : Pointf,
pub control : Pointf,
pub end : Pointf,
}
// From https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B.C3.A9zier_curves
fn quad_interp(t: f64, s: f64, c: f64, e: f64) -> f64 {
(1.0-t)*(1.0-t)*s + 2.0*(1.0-t)*t*c + t*t*e
}
// Reworking the above to give c as the answer and specifying a
// t and result
fn find_control(s: f64, m: f64, e: f64, mid_t: f64) -> f64 {
(m-mid_t*mid_t*e-(1.0-mid_t)*(1.0-mid_t)*s) /
(2.0 * (1.0 - mid_t) * mid_t)
}
impl Bezierq {
// Return a point on the curve; t is 0 (start) -> 1 (end)
pub fn interp(&self, t: f64) -> Pointf {
Pointf { x: quad_interp(t, self.start.x, self.control.x, self.end.x),
y: quad_interp(t, self.start.y, self.control.y, self.end.y) }
}
// Return a curve that passes through the given points
// the 'mid' point happens at the specified 't' interpolation point
pub fn through(s: Pointf, m: Pointf, e: Pointf, mid_t: f64) -> Bezierq {
Bezierq { start: s, end: e,
control: Pointf { | } | x: find_control(s.x, m.x, e.x, mid_t),
y: find_control(s.y, m.y, e.y, mid_t) } }
} | random_line_split |
test_versions.py | import json
import pathlib
import re
import pytest
import snafu.versions
version_paths = list(snafu.versions.VERSIONS_DIR_PATH.iterdir())
version_names = [p.stem for p in version_paths]
@pytest.mark.parametrize('path', version_paths, ids=version_names)
def test_version_definitions(path):
assert path.suffix == '.json', '{} has wrong extension'.format(path)
assert re.match(r'^\d\.\d(?:\-32)?$', path.stem), \
'{} has invalid name'.format(path)
with path.open() as f:
data = json.load(f)
schema = data.pop('type')
possible_types = snafu.versions.InstallerType.__members__
assert schema in possible_types
assert isinstance(data.pop('version_info'), list)
if schema == 'cpython_msi':
for key in ('x86', 'amd64'):
d = data.pop(key)
assert d.pop('url')
assert re.match(r'^[a-f\d]{32}$', d.pop('md5_sum'))
elif schema == 'cpython':
assert data.pop('url')
assert re.match(r'^[a-f\d]{32}$', data.pop('md5_sum'))
assert not data, 'superfulous keys: {}'.format(', '.join(data.keys()))
def test_get_version_cpython_msi():
version = snafu.versions.get_version('3.4', force_32=False)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.amd64.msi',
md5_sum='963f67116935447fad73e09cc561c713',
version_info=(3, 4, 4),
)
def test_get_version_cpython_msi_switch():
version = snafu.versions.get_version('3.4', force_32=True)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.msi',
md5_sum='e96268f7042d2a3d14f7e23b2535738b',
version_info=(3, 4, 4),
)
def test_get_version_cpython():
version = snafu.versions.get_version('3.5', force_32=False)
assert version == snafu.versions.CPythonVersion(
name='3.5',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe',
md5_sum='4276742a4a75a8d07260f13fe956eec4',
version_info=(3, 5, 4),
)
def test_get_version_cpython_switch():
version = snafu.versions.get_version('3.5', force_32=True)
assert version == snafu.versions.CPythonVersion(
name='3.5-32',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe',
md5_sum='9693575358f41f452d03fd33714f223f',
version_info=(3, 5, 4),
forced_32=True,
)
def test_get_version_not_found():
with pytest.raises(snafu.versions.VersionNotFoundError) as ctx:
snafu.versions.get_version('2.8', force_32=False)
assert str(ctx.value) == '2.8'
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, 'Python 3.6'),
('3.6', True, 'Python 3.6-32'),
('3.4', False, 'Python 3.4'),
('3.4', True, 'Python 3.4'),
])
def | (name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert str(version) == result
@pytest.mark.parametrize('name, force_32, cmd', [
('3.6', False, 'python3.exe'),
('3.6', True, 'python3.exe'),
('2.7', False, 'python2.exe'),
('2.7', True, 'python2.exe'),
])
def test_python_major_command(mocker, name, force_32, cmd):
mocker.patch.object(snafu.versions, 'configs', **{
'get_scripts_dir_path.return_value': pathlib.Path(),
})
version = snafu.versions.get_version(name, force_32=force_32)
assert version.python_major_command == pathlib.Path(cmd)
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, '3.6'),
('3.6', True, '3.6'),
('3.4', False, '3.4'),
('3.4', True, '3.4'),
])
def test_arch_free_name(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.arch_free_name == result
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, {'3.6'}),
('3.6', True, {'3.6', '3.6-32'}),
('3.6-32', False, {'3.6-32'}),
('3.4', False, {'3.4'}),
('3.4', True, {'3.4'}),
])
def test_script_version_names(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.script_version_names == result
def test_is_installed(tmpdir, mocker):
mock_metadata = mocker.patch.object(snafu.versions, 'metadata', **{
'get_install_path.return_value': pathlib.Path(str(tmpdir)),
})
version = snafu.versions.get_version('3.6', force_32=False)
assert version.is_installed()
mock_metadata.get_install_path.assert_called_once_with('3.6')
| test_str | identifier_name |
test_versions.py | import json
import pathlib
import re
import pytest
import snafu.versions
version_paths = list(snafu.versions.VERSIONS_DIR_PATH.iterdir())
version_names = [p.stem for p in version_paths]
@pytest.mark.parametrize('path', version_paths, ids=version_names)
def test_version_definitions(path):
assert path.suffix == '.json', '{} has wrong extension'.format(path)
assert re.match(r'^\d\.\d(?:\-32)?$', path.stem), \
'{} has invalid name'.format(path)
with path.open() as f:
data = json.load(f)
schema = data.pop('type')
possible_types = snafu.versions.InstallerType.__members__
assert schema in possible_types
assert isinstance(data.pop('version_info'), list)
if schema == 'cpython_msi':
for key in ('x86', 'amd64'):
d = data.pop(key)
assert d.pop('url')
assert re.match(r'^[a-f\d]{32}$', d.pop('md5_sum'))
elif schema == 'cpython':
|
assert not data, 'superfulous keys: {}'.format(', '.join(data.keys()))
def test_get_version_cpython_msi():
version = snafu.versions.get_version('3.4', force_32=False)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.amd64.msi',
md5_sum='963f67116935447fad73e09cc561c713',
version_info=(3, 4, 4),
)
def test_get_version_cpython_msi_switch():
version = snafu.versions.get_version('3.4', force_32=True)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.msi',
md5_sum='e96268f7042d2a3d14f7e23b2535738b',
version_info=(3, 4, 4),
)
def test_get_version_cpython():
version = snafu.versions.get_version('3.5', force_32=False)
assert version == snafu.versions.CPythonVersion(
name='3.5',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe',
md5_sum='4276742a4a75a8d07260f13fe956eec4',
version_info=(3, 5, 4),
)
def test_get_version_cpython_switch():
version = snafu.versions.get_version('3.5', force_32=True)
assert version == snafu.versions.CPythonVersion(
name='3.5-32',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe',
md5_sum='9693575358f41f452d03fd33714f223f',
version_info=(3, 5, 4),
forced_32=True,
)
def test_get_version_not_found():
with pytest.raises(snafu.versions.VersionNotFoundError) as ctx:
snafu.versions.get_version('2.8', force_32=False)
assert str(ctx.value) == '2.8'
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, 'Python 3.6'),
('3.6', True, 'Python 3.6-32'),
('3.4', False, 'Python 3.4'),
('3.4', True, 'Python 3.4'),
])
def test_str(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert str(version) == result
@pytest.mark.parametrize('name, force_32, cmd', [
('3.6', False, 'python3.exe'),
('3.6', True, 'python3.exe'),
('2.7', False, 'python2.exe'),
('2.7', True, 'python2.exe'),
])
def test_python_major_command(mocker, name, force_32, cmd):
mocker.patch.object(snafu.versions, 'configs', **{
'get_scripts_dir_path.return_value': pathlib.Path(),
})
version = snafu.versions.get_version(name, force_32=force_32)
assert version.python_major_command == pathlib.Path(cmd)
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, '3.6'),
('3.6', True, '3.6'),
('3.4', False, '3.4'),
('3.4', True, '3.4'),
])
def test_arch_free_name(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.arch_free_name == result
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, {'3.6'}),
('3.6', True, {'3.6', '3.6-32'}),
('3.6-32', False, {'3.6-32'}),
('3.4', False, {'3.4'}),
('3.4', True, {'3.4'}),
])
def test_script_version_names(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.script_version_names == result
def test_is_installed(tmpdir, mocker):
mock_metadata = mocker.patch.object(snafu.versions, 'metadata', **{
'get_install_path.return_value': pathlib.Path(str(tmpdir)),
})
version = snafu.versions.get_version('3.6', force_32=False)
assert version.is_installed()
mock_metadata.get_install_path.assert_called_once_with('3.6')
| assert data.pop('url')
assert re.match(r'^[a-f\d]{32}$', data.pop('md5_sum')) | conditional_block |
test_versions.py | import json
import pathlib
import re
import pytest
import snafu.versions
version_paths = list(snafu.versions.VERSIONS_DIR_PATH.iterdir())
version_names = [p.stem for p in version_paths]
@pytest.mark.parametrize('path', version_paths, ids=version_names)
def test_version_definitions(path):
assert path.suffix == '.json', '{} has wrong extension'.format(path)
assert re.match(r'^\d\.\d(?:\-32)?$', path.stem), \
'{} has invalid name'.format(path)
with path.open() as f:
data = json.load(f)
schema = data.pop('type')
possible_types = snafu.versions.InstallerType.__members__
assert schema in possible_types
assert isinstance(data.pop('version_info'), list)
if schema == 'cpython_msi':
for key in ('x86', 'amd64'):
d = data.pop(key)
assert d.pop('url')
assert re.match(r'^[a-f\d]{32}$', d.pop('md5_sum'))
elif schema == 'cpython':
assert data.pop('url')
assert re.match(r'^[a-f\d]{32}$', data.pop('md5_sum'))
assert not data, 'superfulous keys: {}'.format(', '.join(data.keys()))
def test_get_version_cpython_msi():
version = snafu.versions.get_version('3.4', force_32=False)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.amd64.msi',
md5_sum='963f67116935447fad73e09cc561c713',
version_info=(3, 4, 4),
)
def test_get_version_cpython_msi_switch():
|
def test_get_version_cpython():
version = snafu.versions.get_version('3.5', force_32=False)
assert version == snafu.versions.CPythonVersion(
name='3.5',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe',
md5_sum='4276742a4a75a8d07260f13fe956eec4',
version_info=(3, 5, 4),
)
def test_get_version_cpython_switch():
version = snafu.versions.get_version('3.5', force_32=True)
assert version == snafu.versions.CPythonVersion(
name='3.5-32',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe',
md5_sum='9693575358f41f452d03fd33714f223f',
version_info=(3, 5, 4),
forced_32=True,
)
def test_get_version_not_found():
with pytest.raises(snafu.versions.VersionNotFoundError) as ctx:
snafu.versions.get_version('2.8', force_32=False)
assert str(ctx.value) == '2.8'
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, 'Python 3.6'),
('3.6', True, 'Python 3.6-32'),
('3.4', False, 'Python 3.4'),
('3.4', True, 'Python 3.4'),
])
def test_str(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert str(version) == result
@pytest.mark.parametrize('name, force_32, cmd', [
('3.6', False, 'python3.exe'),
('3.6', True, 'python3.exe'),
('2.7', False, 'python2.exe'),
('2.7', True, 'python2.exe'),
])
def test_python_major_command(mocker, name, force_32, cmd):
mocker.patch.object(snafu.versions, 'configs', **{
'get_scripts_dir_path.return_value': pathlib.Path(),
})
version = snafu.versions.get_version(name, force_32=force_32)
assert version.python_major_command == pathlib.Path(cmd)
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, '3.6'),
('3.6', True, '3.6'),
('3.4', False, '3.4'),
('3.4', True, '3.4'),
])
def test_arch_free_name(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.arch_free_name == result
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, {'3.6'}),
('3.6', True, {'3.6', '3.6-32'}),
('3.6-32', False, {'3.6-32'}),
('3.4', False, {'3.4'}),
('3.4', True, {'3.4'}),
])
def test_script_version_names(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.script_version_names == result
def test_is_installed(tmpdir, mocker):
mock_metadata = mocker.patch.object(snafu.versions, 'metadata', **{
'get_install_path.return_value': pathlib.Path(str(tmpdir)),
})
version = snafu.versions.get_version('3.6', force_32=False)
assert version.is_installed()
mock_metadata.get_install_path.assert_called_once_with('3.6')
| version = snafu.versions.get_version('3.4', force_32=True)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.msi',
md5_sum='e96268f7042d2a3d14f7e23b2535738b',
version_info=(3, 4, 4),
) | identifier_body |
test_versions.py | import json
import pathlib
import re
import pytest
import snafu.versions
version_paths = list(snafu.versions.VERSIONS_DIR_PATH.iterdir())
version_names = [p.stem for p in version_paths]
@pytest.mark.parametrize('path', version_paths, ids=version_names)
def test_version_definitions(path):
assert path.suffix == '.json', '{} has wrong extension'.format(path)
assert re.match(r'^\d\.\d(?:\-32)?$', path.stem), \
'{} has invalid name'.format(path)
with path.open() as f:
data = json.load(f)
schema = data.pop('type')
possible_types = snafu.versions.InstallerType.__members__
assert schema in possible_types
assert isinstance(data.pop('version_info'), list)
if schema == 'cpython_msi':
for key in ('x86', 'amd64'):
d = data.pop(key)
assert d.pop('url')
assert re.match(r'^[a-f\d]{32}$', d.pop('md5_sum'))
elif schema == 'cpython':
assert data.pop('url')
assert re.match(r'^[a-f\d]{32}$', data.pop('md5_sum'))
assert not data, 'superfulous keys: {}'.format(', '.join(data.keys()))
def test_get_version_cpython_msi():
version = snafu.versions.get_version('3.4', force_32=False)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.amd64.msi',
md5_sum='963f67116935447fad73e09cc561c713',
version_info=(3, 4, 4),
)
def test_get_version_cpython_msi_switch():
version = snafu.versions.get_version('3.4', force_32=True)
assert version == snafu.versions.CPythonMSIVersion(
name='3.4',
url='https://www.python.org/ftp/python/3.4.4/python-3.4.4.msi',
md5_sum='e96268f7042d2a3d14f7e23b2535738b',
version_info=(3, 4, 4),
)
def test_get_version_cpython():
version = snafu.versions.get_version('3.5', force_32=False)
assert version == snafu.versions.CPythonVersion(
name='3.5',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe',
md5_sum='4276742a4a75a8d07260f13fe956eec4',
version_info=(3, 5, 4),
)
def test_get_version_cpython_switch():
version = snafu.versions.get_version('3.5', force_32=True)
assert version == snafu.versions.CPythonVersion(
name='3.5-32',
url='https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe',
md5_sum='9693575358f41f452d03fd33714f223f',
version_info=(3, 5, 4),
forced_32=True,
)
def test_get_version_not_found():
with pytest.raises(snafu.versions.VersionNotFoundError) as ctx:
snafu.versions.get_version('2.8', force_32=False)
assert str(ctx.value) == '2.8'
@pytest.mark.parametrize('name, force_32, result', [ | ('3.6', True, 'Python 3.6-32'),
('3.4', False, 'Python 3.4'),
('3.4', True, 'Python 3.4'),
])
def test_str(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert str(version) == result
@pytest.mark.parametrize('name, force_32, cmd', [
('3.6', False, 'python3.exe'),
('3.6', True, 'python3.exe'),
('2.7', False, 'python2.exe'),
('2.7', True, 'python2.exe'),
])
def test_python_major_command(mocker, name, force_32, cmd):
mocker.patch.object(snafu.versions, 'configs', **{
'get_scripts_dir_path.return_value': pathlib.Path(),
})
version = snafu.versions.get_version(name, force_32=force_32)
assert version.python_major_command == pathlib.Path(cmd)
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, '3.6'),
('3.6', True, '3.6'),
('3.4', False, '3.4'),
('3.4', True, '3.4'),
])
def test_arch_free_name(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.arch_free_name == result
@pytest.mark.parametrize('name, force_32, result', [
('3.6', False, {'3.6'}),
('3.6', True, {'3.6', '3.6-32'}),
('3.6-32', False, {'3.6-32'}),
('3.4', False, {'3.4'}),
('3.4', True, {'3.4'}),
])
def test_script_version_names(name, force_32, result):
version = snafu.versions.get_version(name, force_32=force_32)
assert version.script_version_names == result
def test_is_installed(tmpdir, mocker):
mock_metadata = mocker.patch.object(snafu.versions, 'metadata', **{
'get_install_path.return_value': pathlib.Path(str(tmpdir)),
})
version = snafu.versions.get_version('3.6', force_32=False)
assert version.is_installed()
mock_metadata.get_install_path.assert_called_once_with('3.6') | ('3.6', False, 'Python 3.6'), | random_line_split |
gpu.rs | use maplit::hashmap;
use crate::GpuState;
use std::{collections::HashMap, mem};
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenHeightmapsUniforms {
pub position: [i32; 2],
pub origin: [i32; 2],
pub spacing: f32,
pub in_slot: i32,
pub out_slot: i32,
pub level_resolution: i32,
pub face: u32,
}
unsafe impl bytemuck::Zeroable for GenHeightmapsUniforms {}
unsafe impl bytemuck::Pod for GenHeightmapsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenDisplacementsUniforms {
pub node_center: [f64; 3],
pub padding0: f64,
pub origin: [i32; 2],
pub position: [i32; 2],
pub stride: i32,
pub heightmaps_slot: i32,
pub displacements_slot: i32,
pub face: i32,
pub level_resolution: u32,
}
unsafe impl bytemuck::Zeroable for GenDisplacementsUniforms {}
unsafe impl bytemuck::Pod for GenDisplacementsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenNormalsUniforms {
pub heightmaps_origin: [i32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub spacing: f32,
pub padding: [f32; 3],
}
unsafe impl bytemuck::Zeroable for GenNormalsUniforms {}
unsafe impl bytemuck::Pod for GenNormalsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenMaterialsUniforms {
pub heightmaps_origin: [i32; 2],
pub parent_origin: [u32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub albedo_slot: i32,
pub parent_slot: i32,
pub spacing: f32,
pub padding: i32,
}
unsafe impl bytemuck::Zeroable for GenMaterialsUniforms {}
unsafe impl bytemuck::Pod for GenMaterialsUniforms {}
pub(crate) struct ComputeShader<U> {
shader: rshader::ShaderSet,
bindgroup_pipeline: Option<(wgpu::BindGroup, wgpu::ComputePipeline)>,
uniforms: Option<wgpu::Buffer>,
name: String,
_phantom: std::marker::PhantomData<U>,
}
#[allow(unused)]
impl<U: bytemuck::Pod> ComputeShader<U> {
pub fn new(shader: rshader::ShaderSource, name: String) -> Self {
Self {
shader: rshader::ShaderSet::compute_only(shader).unwrap(),
bindgroup_pipeline: None,
uniforms: None,
name,
_phantom: std::marker::PhantomData,
}
}
pub fn refresh(&mut self) -> bool {
if self.shader.refresh() {
self.bindgroup_pipeline = None;
true
} else {
false
}
}
pub fn run(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder, | dimensions: (u32, u32, u32),
uniforms: &U,
) {
if self.uniforms.is_none() {
self.uniforms = Some(device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
mapped_at_creation: false,
label: Some(&format!("buffer.{}.uniforms", self.name)),
}));
}
if self.bindgroup_pipeline.is_none() {
let (bind_group, bind_group_layout) = state.bind_group_for_shader(
device,
&self.shader,
hashmap!["ubo".into() => (false, wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: self.uniforms.as_ref().unwrap(),
offset: 0,
size: None,
}))],
HashMap::new(),
&format!("bindgroup.{}", self.name),
);
self.bindgroup_pipeline = Some((
bind_group,
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
layout: Some(&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: [&bind_group_layout][..].into(),
push_constant_ranges: &[],
label: Some(&format!("pipeline.{}.layout", self.name)),
})),
module: &device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(&format!("shader.{}", self.name)),
source: wgpu::ShaderSource::SpirV(self.shader.compute().into()),
flags: wgpu::ShaderFlags::empty(),
}),
entry_point: "main",
label: Some(&format!("pipeline.{}", self.name)),
}),
));
}
let staging = device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_SRC,
label: Some(&format!("buffer.temporary.{}.upload", self.name)),
mapped_at_creation: true,
});
let mut buffer_view = staging.slice(..).get_mapped_range_mut();
buffer_view[..mem::size_of::<U>()].copy_from_slice(bytemuck::bytes_of(uniforms));
drop(buffer_view);
staging.unmap();
encoder.copy_buffer_to_buffer(
&staging,
0,
self.uniforms.as_ref().unwrap(),
0,
mem::size_of::<U>() as u64,
);
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_pipeline(&self.bindgroup_pipeline.as_ref().unwrap().1);
cpass.set_bind_group(0, &self.bindgroup_pipeline.as_ref().unwrap().0, &[]);
cpass.dispatch(dimensions.0, dimensions.1, dimensions.2);
}
} | state: &GpuState, | random_line_split |
gpu.rs | use maplit::hashmap;
use crate::GpuState;
use std::{collections::HashMap, mem};
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenHeightmapsUniforms {
pub position: [i32; 2],
pub origin: [i32; 2],
pub spacing: f32,
pub in_slot: i32,
pub out_slot: i32,
pub level_resolution: i32,
pub face: u32,
}
unsafe impl bytemuck::Zeroable for GenHeightmapsUniforms {}
unsafe impl bytemuck::Pod for GenHeightmapsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenDisplacementsUniforms {
pub node_center: [f64; 3],
pub padding0: f64,
pub origin: [i32; 2],
pub position: [i32; 2],
pub stride: i32,
pub heightmaps_slot: i32,
pub displacements_slot: i32,
pub face: i32,
pub level_resolution: u32,
}
unsafe impl bytemuck::Zeroable for GenDisplacementsUniforms {}
unsafe impl bytemuck::Pod for GenDisplacementsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenNormalsUniforms {
pub heightmaps_origin: [i32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub spacing: f32,
pub padding: [f32; 3],
}
unsafe impl bytemuck::Zeroable for GenNormalsUniforms {}
unsafe impl bytemuck::Pod for GenNormalsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenMaterialsUniforms {
pub heightmaps_origin: [i32; 2],
pub parent_origin: [u32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub albedo_slot: i32,
pub parent_slot: i32,
pub spacing: f32,
pub padding: i32,
}
unsafe impl bytemuck::Zeroable for GenMaterialsUniforms {}
unsafe impl bytemuck::Pod for GenMaterialsUniforms {}
pub(crate) struct ComputeShader<U> {
shader: rshader::ShaderSet,
bindgroup_pipeline: Option<(wgpu::BindGroup, wgpu::ComputePipeline)>,
uniforms: Option<wgpu::Buffer>,
name: String,
_phantom: std::marker::PhantomData<U>,
}
#[allow(unused)]
impl<U: bytemuck::Pod> ComputeShader<U> {
pub fn new(shader: rshader::ShaderSource, name: String) -> Self {
Self {
shader: rshader::ShaderSet::compute_only(shader).unwrap(),
bindgroup_pipeline: None,
uniforms: None,
name,
_phantom: std::marker::PhantomData,
}
}
pub fn | (&mut self) -> bool {
if self.shader.refresh() {
self.bindgroup_pipeline = None;
true
} else {
false
}
}
pub fn run(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
state: &GpuState,
dimensions: (u32, u32, u32),
uniforms: &U,
) {
if self.uniforms.is_none() {
self.uniforms = Some(device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
mapped_at_creation: false,
label: Some(&format!("buffer.{}.uniforms", self.name)),
}));
}
if self.bindgroup_pipeline.is_none() {
let (bind_group, bind_group_layout) = state.bind_group_for_shader(
device,
&self.shader,
hashmap!["ubo".into() => (false, wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: self.uniforms.as_ref().unwrap(),
offset: 0,
size: None,
}))],
HashMap::new(),
&format!("bindgroup.{}", self.name),
);
self.bindgroup_pipeline = Some((
bind_group,
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
layout: Some(&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: [&bind_group_layout][..].into(),
push_constant_ranges: &[],
label: Some(&format!("pipeline.{}.layout", self.name)),
})),
module: &device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(&format!("shader.{}", self.name)),
source: wgpu::ShaderSource::SpirV(self.shader.compute().into()),
flags: wgpu::ShaderFlags::empty(),
}),
entry_point: "main",
label: Some(&format!("pipeline.{}", self.name)),
}),
));
}
let staging = device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_SRC,
label: Some(&format!("buffer.temporary.{}.upload", self.name)),
mapped_at_creation: true,
});
let mut buffer_view = staging.slice(..).get_mapped_range_mut();
buffer_view[..mem::size_of::<U>()].copy_from_slice(bytemuck::bytes_of(uniforms));
drop(buffer_view);
staging.unmap();
encoder.copy_buffer_to_buffer(
&staging,
0,
self.uniforms.as_ref().unwrap(),
0,
mem::size_of::<U>() as u64,
);
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_pipeline(&self.bindgroup_pipeline.as_ref().unwrap().1);
cpass.set_bind_group(0, &self.bindgroup_pipeline.as_ref().unwrap().0, &[]);
cpass.dispatch(dimensions.0, dimensions.1, dimensions.2);
}
}
| refresh | identifier_name |
gpu.rs | use maplit::hashmap;
use crate::GpuState;
use std::{collections::HashMap, mem};
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenHeightmapsUniforms {
pub position: [i32; 2],
pub origin: [i32; 2],
pub spacing: f32,
pub in_slot: i32,
pub out_slot: i32,
pub level_resolution: i32,
pub face: u32,
}
unsafe impl bytemuck::Zeroable for GenHeightmapsUniforms {}
unsafe impl bytemuck::Pod for GenHeightmapsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenDisplacementsUniforms {
pub node_center: [f64; 3],
pub padding0: f64,
pub origin: [i32; 2],
pub position: [i32; 2],
pub stride: i32,
pub heightmaps_slot: i32,
pub displacements_slot: i32,
pub face: i32,
pub level_resolution: u32,
}
unsafe impl bytemuck::Zeroable for GenDisplacementsUniforms {}
unsafe impl bytemuck::Pod for GenDisplacementsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenNormalsUniforms {
pub heightmaps_origin: [i32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub spacing: f32,
pub padding: [f32; 3],
}
unsafe impl bytemuck::Zeroable for GenNormalsUniforms {}
unsafe impl bytemuck::Pod for GenNormalsUniforms {}
#[repr(C)]
#[derive(Copy, Clone)]
pub(crate) struct GenMaterialsUniforms {
pub heightmaps_origin: [i32; 2],
pub parent_origin: [u32; 2],
pub heightmaps_slot: i32,
pub normals_slot: i32,
pub albedo_slot: i32,
pub parent_slot: i32,
pub spacing: f32,
pub padding: i32,
}
unsafe impl bytemuck::Zeroable for GenMaterialsUniforms {}
unsafe impl bytemuck::Pod for GenMaterialsUniforms {}
pub(crate) struct ComputeShader<U> {
shader: rshader::ShaderSet,
bindgroup_pipeline: Option<(wgpu::BindGroup, wgpu::ComputePipeline)>,
uniforms: Option<wgpu::Buffer>,
name: String,
_phantom: std::marker::PhantomData<U>,
}
#[allow(unused)]
impl<U: bytemuck::Pod> ComputeShader<U> {
pub fn new(shader: rshader::ShaderSource, name: String) -> Self {
Self {
shader: rshader::ShaderSet::compute_only(shader).unwrap(),
bindgroup_pipeline: None,
uniforms: None,
name,
_phantom: std::marker::PhantomData,
}
}
pub fn refresh(&mut self) -> bool {
if self.shader.refresh() {
self.bindgroup_pipeline = None;
true
} else |
}
pub fn run(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
state: &GpuState,
dimensions: (u32, u32, u32),
uniforms: &U,
) {
if self.uniforms.is_none() {
self.uniforms = Some(device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
mapped_at_creation: false,
label: Some(&format!("buffer.{}.uniforms", self.name)),
}));
}
if self.bindgroup_pipeline.is_none() {
let (bind_group, bind_group_layout) = state.bind_group_for_shader(
device,
&self.shader,
hashmap!["ubo".into() => (false, wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: self.uniforms.as_ref().unwrap(),
offset: 0,
size: None,
}))],
HashMap::new(),
&format!("bindgroup.{}", self.name),
);
self.bindgroup_pipeline = Some((
bind_group,
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
layout: Some(&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: [&bind_group_layout][..].into(),
push_constant_ranges: &[],
label: Some(&format!("pipeline.{}.layout", self.name)),
})),
module: &device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(&format!("shader.{}", self.name)),
source: wgpu::ShaderSource::SpirV(self.shader.compute().into()),
flags: wgpu::ShaderFlags::empty(),
}),
entry_point: "main",
label: Some(&format!("pipeline.{}", self.name)),
}),
));
}
let staging = device.create_buffer(&wgpu::BufferDescriptor {
size: mem::size_of::<U>() as u64,
usage: wgpu::BufferUsage::COPY_SRC,
label: Some(&format!("buffer.temporary.{}.upload", self.name)),
mapped_at_creation: true,
});
let mut buffer_view = staging.slice(..).get_mapped_range_mut();
buffer_view[..mem::size_of::<U>()].copy_from_slice(bytemuck::bytes_of(uniforms));
drop(buffer_view);
staging.unmap();
encoder.copy_buffer_to_buffer(
&staging,
0,
self.uniforms.as_ref().unwrap(),
0,
mem::size_of::<U>() as u64,
);
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_pipeline(&self.bindgroup_pipeline.as_ref().unwrap().1);
cpass.set_bind_group(0, &self.bindgroup_pipeline.as_ref().unwrap().0, &[]);
cpass.dispatch(dimensions.0, dimensions.1, dimensions.2);
}
}
| {
false
} | conditional_block |
mod.rs | //! Data structures related to the `/proc/<pid>/*` files
//!
//! The `Process` struct can load everything about a running process, and
//! provides some aggregate data about them.
mod cmd_line;
mod stat;
use std::fmt;
use crate::linux::{Jiffies, Ratio, PAGESIZE};
use crate::procfs::Result;
pub use self::cmd_line::CmdLine;
pub use self::stat::{Stat, State};
/// Information about a running process
#[derive(Clone, PartialEq, Eq, Debug, Default)]
pub struct Process {
/// The stat info for a process
pub stat: Stat,
/// The command line, as revealed by the /proc fs
pub cmdline: CmdLine,
}
impl Process {
pub fn from_pid<P: fmt::Display + Copy>(p: P) -> Result<Process> {
Ok(Process {
stat: Stat::from_pid(p)?,
cmdline: CmdLine::from_pid(p)?,
})
}
pub fn useful_cmdline(&self) -> String {
let cmd = self.cmdline.display();
if cmd.is_empty() | else {
cmd
}
}
/// What percent this process is using
///
/// First argument should be in bytes.
pub fn percent_ram(&self, of_bytes: usize) -> f64 {
pages_to_bytes(self.stat.rss) as f64 / of_bytes as f64 * 100.0
}
/// Compare this processes cpu utilization since the process representing th start time
///
/// The passed-in `start_process` is the time that we are comparing
/// against: `self` should be newer.
///
/// The `total_cpu` is how many Jiffies have passed on the cpu over the
/// same time period.
///
/// # Panics
/// If the total_cpu on start_process is higher than the total_cpu on current.
pub fn cpu_utilization_since<'a>(
&'a self,
start_process: &'a Process,
total_cpu: Jiffies,
) -> ProcessCpuUsage<'a> {
let (start_ps, end_ps) = (&start_process.stat, &self.stat);
if end_ps.utime < start_ps.utime || end_ps.stime < start_ps.stime {
panic!("End process is before start process (arguments called in wrong order)");
}
let user = 100.0
* (end_ps.utime - start_ps.utime)
.duration()
.ratio(&total_cpu.duration());
let sys = 100.0
* (end_ps.stime - start_ps.stime)
.duration()
.ratio(&total_cpu.duration());
ProcessCpuUsage {
process: &start_process,
upercent: user,
spercent: sys,
total: user + sys,
}
}
}
/// Represent the percent CPU utilization of a specific process over a specific
/// time period
///
/// This is generated by loading `RunningProcs` twice and diffing them.
#[derive(Debug)]
pub struct ProcessCpuUsage<'a> {
/// The process we're reporting on
pub process: &'a Process,
/// Percent time spent in user mode
pub upercent: f64,
/// Percent time spent in system mode
pub spercent: f64,
/// upercent + spercent
pub total: f64,
}
fn pages_to_bytes(pages: u64) -> u64 {
pages * (*PAGESIZE)
}
| {
self.stat.comm.clone()
} | conditional_block |
mod.rs | //! Data structures related to the `/proc/<pid>/*` files
//!
//! The `Process` struct can load everything about a running process, and
//! provides some aggregate data about them.
mod cmd_line;
mod stat;
| use crate::procfs::Result;
pub use self::cmd_line::CmdLine;
pub use self::stat::{Stat, State};
/// Information about a running process
#[derive(Clone, PartialEq, Eq, Debug, Default)]
pub struct Process {
/// The stat info for a process
pub stat: Stat,
/// The command line, as revealed by the /proc fs
pub cmdline: CmdLine,
}
impl Process {
pub fn from_pid<P: fmt::Display + Copy>(p: P) -> Result<Process> {
Ok(Process {
stat: Stat::from_pid(p)?,
cmdline: CmdLine::from_pid(p)?,
})
}
pub fn useful_cmdline(&self) -> String {
let cmd = self.cmdline.display();
if cmd.is_empty() {
self.stat.comm.clone()
} else {
cmd
}
}
/// What percent this process is using
///
/// First argument should be in bytes.
pub fn percent_ram(&self, of_bytes: usize) -> f64 {
pages_to_bytes(self.stat.rss) as f64 / of_bytes as f64 * 100.0
}
/// Compare this processes cpu utilization since the process representing th start time
///
/// The passed-in `start_process` is the time that we are comparing
/// against: `self` should be newer.
///
/// The `total_cpu` is how many Jiffies have passed on the cpu over the
/// same time period.
///
/// # Panics
/// If the total_cpu on start_process is higher than the total_cpu on current.
pub fn cpu_utilization_since<'a>(
&'a self,
start_process: &'a Process,
total_cpu: Jiffies,
) -> ProcessCpuUsage<'a> {
let (start_ps, end_ps) = (&start_process.stat, &self.stat);
if end_ps.utime < start_ps.utime || end_ps.stime < start_ps.stime {
panic!("End process is before start process (arguments called in wrong order)");
}
let user = 100.0
* (end_ps.utime - start_ps.utime)
.duration()
.ratio(&total_cpu.duration());
let sys = 100.0
* (end_ps.stime - start_ps.stime)
.duration()
.ratio(&total_cpu.duration());
ProcessCpuUsage {
process: &start_process,
upercent: user,
spercent: sys,
total: user + sys,
}
}
}
/// Represent the percent CPU utilization of a specific process over a specific
/// time period
///
/// This is generated by loading `RunningProcs` twice and diffing them.
#[derive(Debug)]
pub struct ProcessCpuUsage<'a> {
/// The process we're reporting on
pub process: &'a Process,
/// Percent time spent in user mode
pub upercent: f64,
/// Percent time spent in system mode
pub spercent: f64,
/// upercent + spercent
pub total: f64,
}
fn pages_to_bytes(pages: u64) -> u64 {
pages * (*PAGESIZE)
} | use std::fmt;
use crate::linux::{Jiffies, Ratio, PAGESIZE}; | random_line_split |
mod.rs | //! Data structures related to the `/proc/<pid>/*` files
//!
//! The `Process` struct can load everything about a running process, and
//! provides some aggregate data about them.
mod cmd_line;
mod stat;
use std::fmt;
use crate::linux::{Jiffies, Ratio, PAGESIZE};
use crate::procfs::Result;
pub use self::cmd_line::CmdLine;
pub use self::stat::{Stat, State};
/// Information about a running process
#[derive(Clone, PartialEq, Eq, Debug, Default)]
pub struct Process {
/// The stat info for a process
pub stat: Stat,
/// The command line, as revealed by the /proc fs
pub cmdline: CmdLine,
}
impl Process {
pub fn from_pid<P: fmt::Display + Copy>(p: P) -> Result<Process> {
Ok(Process {
stat: Stat::from_pid(p)?,
cmdline: CmdLine::from_pid(p)?,
})
}
pub fn useful_cmdline(&self) -> String {
let cmd = self.cmdline.display();
if cmd.is_empty() {
self.stat.comm.clone()
} else {
cmd
}
}
/// What percent this process is using
///
/// First argument should be in bytes.
pub fn percent_ram(&self, of_bytes: usize) -> f64 {
pages_to_bytes(self.stat.rss) as f64 / of_bytes as f64 * 100.0
}
/// Compare this processes cpu utilization since the process representing th start time
///
/// The passed-in `start_process` is the time that we are comparing
/// against: `self` should be newer.
///
/// The `total_cpu` is how many Jiffies have passed on the cpu over the
/// same time period.
///
/// # Panics
/// If the total_cpu on start_process is higher than the total_cpu on current.
pub fn | <'a>(
&'a self,
start_process: &'a Process,
total_cpu: Jiffies,
) -> ProcessCpuUsage<'a> {
let (start_ps, end_ps) = (&start_process.stat, &self.stat);
if end_ps.utime < start_ps.utime || end_ps.stime < start_ps.stime {
panic!("End process is before start process (arguments called in wrong order)");
}
let user = 100.0
* (end_ps.utime - start_ps.utime)
.duration()
.ratio(&total_cpu.duration());
let sys = 100.0
* (end_ps.stime - start_ps.stime)
.duration()
.ratio(&total_cpu.duration());
ProcessCpuUsage {
process: &start_process,
upercent: user,
spercent: sys,
total: user + sys,
}
}
}
/// Represent the percent CPU utilization of a specific process over a specific
/// time period
///
/// This is generated by loading `RunningProcs` twice and diffing them.
#[derive(Debug)]
pub struct ProcessCpuUsage<'a> {
/// The process we're reporting on
pub process: &'a Process,
/// Percent time spent in user mode
pub upercent: f64,
/// Percent time spent in system mode
pub spercent: f64,
/// upercent + spercent
pub total: f64,
}
fn pages_to_bytes(pages: u64) -> u64 {
pages * (*PAGESIZE)
}
| cpu_utilization_since | identifier_name |
convert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
import logging
import sys
import click
import canmatrix.convert
import canmatrix.log
logger = logging.getLogger(__name__)
def get_formats():
input = ""
output = ""
for suppFormat, features in canmatrix.formats.supportedFormats.items():
if 'load' in features:
input += suppFormat + "\n"
if 'dump' in features:
output += suppFormat + "\n"
return (input, output)
@click.command()
# global switches
@click.option('-v', '--verbose', 'verbosity', count=True, default=1)
@click.option('-s', '--silent/--no-silent', is_flag=True, default=False, help="don't print status messages to stdout. (only errors)")
@click.option('-f', '--force_output', help="enforce output format, ignoring output file extension (e.g., -f csv).\nSupported formats for writing:\n" + get_formats()[1])
@click.option('-i', '--input_format', 'import_type', help="give hint for input format\nSupported formats for reading:\n" + get_formats()[0])
@click.option('--ignoreEncodingErrors/--no-ignoreEncodingErrors', 'ignoreEncodingErrors', default=False, help="ignore character encoding errors during export (dbc,dbf,sym)")
# manipulation and filter switches
@click.option('--deleteObsoleteDefines/--no-deleteObsoleteDefines', 'deleteObsoleteDefines', default=False, help="delete defines from all ECUs, frames and Signals\nExample --deleteObsoleteDefines")
@click.option('--deleteEcu', 'deleteEcu', help="delete Ecu form databases. (comma separated list)\nSyntax: --deleteEcu=myEcu,mySecondEcu")
@click.option('--renameEcu', 'renameEcu', help="rename Ecu form databases. (comma separated list)\nSyntax: --renameEcu=myOldEcu:myNewEcu,mySecondEcu:mySecondNewEcu")
@click.option('--deleteSignal', 'deleteSignal', help="delete Signal form databases. (comma separated list)\nSyntax: --deleteSignal=mySignal1,mySecondSignal")
@click.option('--renameSignal', 'renameSignal', help="rename Signal form databases. (comma separated list)\nSyntax: --renameSignal=myOldSignal:myNewSignal,mySecondSignal:mySecondNewSignal")
@click.option('--deleteZeroSignals/--no-deleteZeroSignals', 'deleteZeroSignals', default=False, help="delete zero length signals (signals with 0 bit length) from matrix\ndefault False")
@click.option('--deleteSignalAttributes', 'deleteSignalAttributes', help="delete attributes from all signals\nExample --deleteSignalAttributes GenMsgSomeVar,CycleTime")
@click.option('--deleteFrame', 'deleteFrame', help="delete Frame form databases. (comma separated list)\nSyntax: --deleteFrame=myFrame1,mySecondFrame")
@click.option('--renameFrame', 'renameFrame', help="increment each frame.id in database by increment\nSyntax: --frameIdIncrement=increment")
@click.option('--addFrameReceiver', 'addFrameReceiver', help="add receiver Ecu to frame(s) (comma separated list)\nSyntax: --addFrameReceiver=framename:myNewEcu,mySecondEcu:myNEWEcu")
@click.option('--changeFrameId', 'changeFrameId', help="change frame.id in database\nSyntax: --changeFrameId=oldId:newId")
@click.option('--setFrameFd', 'setFrameFd', help="set Frame from database to canfd. (comma separated list)\nSyntax: --setFrameFd=myFrame1,mySecondFrame")
@click.option('--unsetFrameFd', 'unsetFrameFd', help="set Frame from database to normal (not FD). (comma separated list)\nSyntax: --unsetFrameFd=myFrame1,mySecondFrame")
@click.option('--recalcDLC', 'recalcDLC', help="recalculate dlc; max: use maximum of stored and calculated dlc; force: force new calculated dlc")
@click.option('--skipLongDlc', 'skipLongDlc', help="skip all Frames with dlc bigger than given threshold")
@click.option('--cutLongFrames', 'cutLongFrames', help="cut all signals out of Frames with dlc bigger than given threshold")
@click.option('--deleteFrameAttributes', 'deleteFrameAttributes', help="delete attributes from all frames\nExample --deleteFrameAttributes GenMsgSomeVar,CycleTime")
@click.option('--ecus', help="Copy only given ECUs (comma separated list) to target matrix; suffix 'rx' or 'tx' for selection: Example: --ecus FirstEcu:rx,SecondEcu:tx,ThirdEcu")
@click.option('--frames', help="Copy only given Frames (comma separated list) to target matrix")
@click.option('--signals', help="Copy only given Signals (comma separated list) to target matrix just as 'free' signals without containing frame")
@click.option('--merge', help="merge additional can databases.\nSyntax: --merge filename[:ecu=SOMEECU][:frame=FRAME1][:frame=FRAME2],filename2")
# arxml switches
@click.option('--arxmlIgnoreClusterInfo/--no-arxmlIgnoreClusterInfo', 'arxmlIgnoreClusterInfo', default=False, help="Ignore any can cluster info from arxml; Import all frames in one matrix\ndefault False")
@click.option('--arxmlUseXpath(--no-arxmlUseXpath', 'arxmlUseXpath', default=False, help="Use experimental Xpath-Implementation for resolving AR-Paths; \ndefault False")
@click.option('--arxmlExportVersion', 'arVersion', default="3.2.3", help="Set output AUTOSAR version\ncurrently only 3.2.3 and 4.1.0 are supported\ndefault 3.2.3")
# dbc switches
@click.option('--dbcImportEncoding', 'dbcImportEncoding', default="iso-8859-1", help="Import charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcImportCommentEncoding', 'dbcImportCommentEncoding', default="iso-8859-1", help="Import charset of Comments in dbc\ndefault iso-8859-1")
@click.option('--dbcExportEncoding', 'dbcExportEncoding', default="iso-8859-1", help="Export charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcExportCommentEncoding', 'dbcExportCommentEncoding', default="iso-8859-1", help="Export charset of comments in dbc\ndefault iso-8859-1")
@click.option('--dbcUniqueSignalNames/--no-dbcUniqueSignalNames', 'dbcUniqueSignalNames', default=True, help="Check if signal names are unique per frame")
# dbf switches
@click.option('--dbfImportEncoding', 'dbfImportEncoding', default="iso-8859-1", help="Import charset of dbf, maybe utf-8\ndefault iso-8859-1")
@click.option('--dbfExportEncoding', 'dbfExportEncoding', default="iso-8859-1", help="Export charset of dbf, maybe utf-8\ndefault iso-8859-1")
# sym switches
@click.option('--symImportEncoding', 'symImportEncoding', default="iso-8859-1", help="Import charset of sym format, maybe utf-8\ndefault iso-8859-1")
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# xls/csv switches
@click.option('--xlsMotorolaBitFormat', 'xlsMotorolaBitFormat', default="msbreverse", help="Excel format for startbit of motorola codescharset signals\nValid values: msb, lsb, msbreverse\n default msbreverse")
@click.option('--additionalFrameAttributes', 'additionalFrameAttributes', default = "", help="append columns to csv/xls(x), example: is_fd")
@click.option('--additionalSignalAttributes', 'additionalSignalAttributes', default = "", help="append columns to csv/xls(x), example: is_signed,attributes[\"GenSigStartValue\"]")
@click.option('--xlsValuesInSeperateLines/--no-xlsValuesInSeperateLines', 'xlsValuesInSeperateLines', default = False, help="Excel format: create seperate line for each value of signal value table\tdefault: False")
# json switches
@click.option('--jsonExportCanard/--no-jsonExportCanard', 'jsonExportCanard', default=False, help="Export Canard compatible json format")
@click.option('--jsonExportAll/--no-jsonExportAll', 'jsonExportAll', default=False, help="Export more data to json format")
@click.option('--jsonMotorolaBitFormat', 'jsonMotorolaBitFormat', default="lsb", help="Json format: startbit of motorola signals\nValid values: msb, lsb, msbreverse\n default lsb")
@click.option('--jsonNativeTypes/--no-jsonNativeTypes', 'jsonNativeTypes', default=False, help="Uses native json representation for decimals instead of string.")
#sym switches
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# in and out file
@click.argument('infile', required=True)
@click.argument('outfile', required=True)
#
def cli_convert(infile, outfile, silent, verbosity, **options):
|
if __name__ == '__main__':
sys.exit(cli_convert())
| """
canmatrix.cli.convert [options] import-file export-file
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym
export-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym|*.py
\n"""
root_logger = canmatrix.log.setup_logger()
if silent is True:
# only print error messages, ignore verbosity flag
verbosity = -1
options["silent"] = True
canmatrix.log.set_log_level(root_logger, verbosity)
if options["ignoreEncodingErrors"]:
options["ignoreEncodingErrors"] = "ignore"
else:
options["ignoreEncodingErrors"] = ""
canmatrix.convert.convert(infile, outfile, **options)
return 0 | identifier_body |
convert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
import logging
import sys
import click
import canmatrix.convert
import canmatrix.log
logger = logging.getLogger(__name__)
def | ():
input = ""
output = ""
for suppFormat, features in canmatrix.formats.supportedFormats.items():
if 'load' in features:
input += suppFormat + "\n"
if 'dump' in features:
output += suppFormat + "\n"
return (input, output)
@click.command()
# global switches
@click.option('-v', '--verbose', 'verbosity', count=True, default=1)
@click.option('-s', '--silent/--no-silent', is_flag=True, default=False, help="don't print status messages to stdout. (only errors)")
@click.option('-f', '--force_output', help="enforce output format, ignoring output file extension (e.g., -f csv).\nSupported formats for writing:\n" + get_formats()[1])
@click.option('-i', '--input_format', 'import_type', help="give hint for input format\nSupported formats for reading:\n" + get_formats()[0])
@click.option('--ignoreEncodingErrors/--no-ignoreEncodingErrors', 'ignoreEncodingErrors', default=False, help="ignore character encoding errors during export (dbc,dbf,sym)")
# manipulation and filter switches
@click.option('--deleteObsoleteDefines/--no-deleteObsoleteDefines', 'deleteObsoleteDefines', default=False, help="delete defines from all ECUs, frames and Signals\nExample --deleteObsoleteDefines")
@click.option('--deleteEcu', 'deleteEcu', help="delete Ecu form databases. (comma separated list)\nSyntax: --deleteEcu=myEcu,mySecondEcu")
@click.option('--renameEcu', 'renameEcu', help="rename Ecu form databases. (comma separated list)\nSyntax: --renameEcu=myOldEcu:myNewEcu,mySecondEcu:mySecondNewEcu")
@click.option('--deleteSignal', 'deleteSignal', help="delete Signal form databases. (comma separated list)\nSyntax: --deleteSignal=mySignal1,mySecondSignal")
@click.option('--renameSignal', 'renameSignal', help="rename Signal form databases. (comma separated list)\nSyntax: --renameSignal=myOldSignal:myNewSignal,mySecondSignal:mySecondNewSignal")
@click.option('--deleteZeroSignals/--no-deleteZeroSignals', 'deleteZeroSignals', default=False, help="delete zero length signals (signals with 0 bit length) from matrix\ndefault False")
@click.option('--deleteSignalAttributes', 'deleteSignalAttributes', help="delete attributes from all signals\nExample --deleteSignalAttributes GenMsgSomeVar,CycleTime")
@click.option('--deleteFrame', 'deleteFrame', help="delete Frame form databases. (comma separated list)\nSyntax: --deleteFrame=myFrame1,mySecondFrame")
@click.option('--renameFrame', 'renameFrame', help="increment each frame.id in database by increment\nSyntax: --frameIdIncrement=increment")
@click.option('--addFrameReceiver', 'addFrameReceiver', help="add receiver Ecu to frame(s) (comma separated list)\nSyntax: --addFrameReceiver=framename:myNewEcu,mySecondEcu:myNEWEcu")
@click.option('--changeFrameId', 'changeFrameId', help="change frame.id in database\nSyntax: --changeFrameId=oldId:newId")
@click.option('--setFrameFd', 'setFrameFd', help="set Frame from database to canfd. (comma separated list)\nSyntax: --setFrameFd=myFrame1,mySecondFrame")
@click.option('--unsetFrameFd', 'unsetFrameFd', help="set Frame from database to normal (not FD). (comma separated list)\nSyntax: --unsetFrameFd=myFrame1,mySecondFrame")
@click.option('--recalcDLC', 'recalcDLC', help="recalculate dlc; max: use maximum of stored and calculated dlc; force: force new calculated dlc")
@click.option('--skipLongDlc', 'skipLongDlc', help="skip all Frames with dlc bigger than given threshold")
@click.option('--cutLongFrames', 'cutLongFrames', help="cut all signals out of Frames with dlc bigger than given threshold")
@click.option('--deleteFrameAttributes', 'deleteFrameAttributes', help="delete attributes from all frames\nExample --deleteFrameAttributes GenMsgSomeVar,CycleTime")
@click.option('--ecus', help="Copy only given ECUs (comma separated list) to target matrix; suffix 'rx' or 'tx' for selection: Example: --ecus FirstEcu:rx,SecondEcu:tx,ThirdEcu")
@click.option('--frames', help="Copy only given Frames (comma separated list) to target matrix")
@click.option('--signals', help="Copy only given Signals (comma separated list) to target matrix just as 'free' signals without containing frame")
@click.option('--merge', help="merge additional can databases.\nSyntax: --merge filename[:ecu=SOMEECU][:frame=FRAME1][:frame=FRAME2],filename2")
# arxml switches
@click.option('--arxmlIgnoreClusterInfo/--no-arxmlIgnoreClusterInfo', 'arxmlIgnoreClusterInfo', default=False, help="Ignore any can cluster info from arxml; Import all frames in one matrix\ndefault False")
@click.option('--arxmlUseXpath(--no-arxmlUseXpath', 'arxmlUseXpath', default=False, help="Use experimental Xpath-Implementation for resolving AR-Paths; \ndefault False")
@click.option('--arxmlExportVersion', 'arVersion', default="3.2.3", help="Set output AUTOSAR version\ncurrently only 3.2.3 and 4.1.0 are supported\ndefault 3.2.3")
# dbc switches
@click.option('--dbcImportEncoding', 'dbcImportEncoding', default="iso-8859-1", help="Import charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcImportCommentEncoding', 'dbcImportCommentEncoding', default="iso-8859-1", help="Import charset of Comments in dbc\ndefault iso-8859-1")
@click.option('--dbcExportEncoding', 'dbcExportEncoding', default="iso-8859-1", help="Export charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcExportCommentEncoding', 'dbcExportCommentEncoding', default="iso-8859-1", help="Export charset of comments in dbc\ndefault iso-8859-1")
@click.option('--dbcUniqueSignalNames/--no-dbcUniqueSignalNames', 'dbcUniqueSignalNames', default=True, help="Check if signal names are unique per frame")
# dbf switches
@click.option('--dbfImportEncoding', 'dbfImportEncoding', default="iso-8859-1", help="Import charset of dbf, maybe utf-8\ndefault iso-8859-1")
@click.option('--dbfExportEncoding', 'dbfExportEncoding', default="iso-8859-1", help="Export charset of dbf, maybe utf-8\ndefault iso-8859-1")
# sym switches
@click.option('--symImportEncoding', 'symImportEncoding', default="iso-8859-1", help="Import charset of sym format, maybe utf-8\ndefault iso-8859-1")
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# xls/csv switches
@click.option('--xlsMotorolaBitFormat', 'xlsMotorolaBitFormat', default="msbreverse", help="Excel format for startbit of motorola codescharset signals\nValid values: msb, lsb, msbreverse\n default msbreverse")
@click.option('--additionalFrameAttributes', 'additionalFrameAttributes', default = "", help="append columns to csv/xls(x), example: is_fd")
@click.option('--additionalSignalAttributes', 'additionalSignalAttributes', default = "", help="append columns to csv/xls(x), example: is_signed,attributes[\"GenSigStartValue\"]")
@click.option('--xlsValuesInSeperateLines/--no-xlsValuesInSeperateLines', 'xlsValuesInSeperateLines', default = False, help="Excel format: create seperate line for each value of signal value table\tdefault: False")
# json switches
@click.option('--jsonExportCanard/--no-jsonExportCanard', 'jsonExportCanard', default=False, help="Export Canard compatible json format")
@click.option('--jsonExportAll/--no-jsonExportAll', 'jsonExportAll', default=False, help="Export more data to json format")
@click.option('--jsonMotorolaBitFormat', 'jsonMotorolaBitFormat', default="lsb", help="Json format: startbit of motorola signals\nValid values: msb, lsb, msbreverse\n default lsb")
@click.option('--jsonNativeTypes/--no-jsonNativeTypes', 'jsonNativeTypes', default=False, help="Uses native json representation for decimals instead of string.")
#sym switches
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# in and out file
@click.argument('infile', required=True)
@click.argument('outfile', required=True)
#
def cli_convert(infile, outfile, silent, verbosity, **options):
"""
canmatrix.cli.convert [options] import-file export-file
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym
export-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym|*.py
\n"""
root_logger = canmatrix.log.setup_logger()
if silent is True:
# only print error messages, ignore verbosity flag
verbosity = -1
options["silent"] = True
canmatrix.log.set_log_level(root_logger, verbosity)
if options["ignoreEncodingErrors"]:
options["ignoreEncodingErrors"] = "ignore"
else:
options["ignoreEncodingErrors"] = ""
canmatrix.convert.convert(infile, outfile, **options)
return 0
if __name__ == '__main__':
sys.exit(cli_convert())
| get_formats | identifier_name |
convert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
import logging
import sys
import click
import canmatrix.convert
import canmatrix.log
logger = logging.getLogger(__name__)
def get_formats():
input = ""
output = ""
for suppFormat, features in canmatrix.formats.supportedFormats.items():
if 'load' in features:
input += suppFormat + "\n"
if 'dump' in features:
output += suppFormat + "\n"
return (input, output)
@click.command()
# global switches
@click.option('-v', '--verbose', 'verbosity', count=True, default=1)
@click.option('-s', '--silent/--no-silent', is_flag=True, default=False, help="don't print status messages to stdout. (only errors)")
@click.option('-f', '--force_output', help="enforce output format, ignoring output file extension (e.g., -f csv).\nSupported formats for writing:\n" + get_formats()[1])
@click.option('-i', '--input_format', 'import_type', help="give hint for input format\nSupported formats for reading:\n" + get_formats()[0])
@click.option('--ignoreEncodingErrors/--no-ignoreEncodingErrors', 'ignoreEncodingErrors', default=False, help="ignore character encoding errors during export (dbc,dbf,sym)")
# manipulation and filter switches
@click.option('--deleteObsoleteDefines/--no-deleteObsoleteDefines', 'deleteObsoleteDefines', default=False, help="delete defines from all ECUs, frames and Signals\nExample --deleteObsoleteDefines")
@click.option('--deleteEcu', 'deleteEcu', help="delete Ecu form databases. (comma separated list)\nSyntax: --deleteEcu=myEcu,mySecondEcu")
@click.option('--renameEcu', 'renameEcu', help="rename Ecu form databases. (comma separated list)\nSyntax: --renameEcu=myOldEcu:myNewEcu,mySecondEcu:mySecondNewEcu")
@click.option('--deleteSignal', 'deleteSignal', help="delete Signal form databases. (comma separated list)\nSyntax: --deleteSignal=mySignal1,mySecondSignal")
@click.option('--renameSignal', 'renameSignal', help="rename Signal form databases. (comma separated list)\nSyntax: --renameSignal=myOldSignal:myNewSignal,mySecondSignal:mySecondNewSignal")
@click.option('--deleteZeroSignals/--no-deleteZeroSignals', 'deleteZeroSignals', default=False, help="delete zero length signals (signals with 0 bit length) from matrix\ndefault False")
@click.option('--deleteSignalAttributes', 'deleteSignalAttributes', help="delete attributes from all signals\nExample --deleteSignalAttributes GenMsgSomeVar,CycleTime")
@click.option('--deleteFrame', 'deleteFrame', help="delete Frame form databases. (comma separated list)\nSyntax: --deleteFrame=myFrame1,mySecondFrame")
@click.option('--renameFrame', 'renameFrame', help="increment each frame.id in database by increment\nSyntax: --frameIdIncrement=increment")
@click.option('--addFrameReceiver', 'addFrameReceiver', help="add receiver Ecu to frame(s) (comma separated list)\nSyntax: --addFrameReceiver=framename:myNewEcu,mySecondEcu:myNEWEcu")
@click.option('--changeFrameId', 'changeFrameId', help="change frame.id in database\nSyntax: --changeFrameId=oldId:newId")
@click.option('--setFrameFd', 'setFrameFd', help="set Frame from database to canfd. (comma separated list)\nSyntax: --setFrameFd=myFrame1,mySecondFrame")
@click.option('--unsetFrameFd', 'unsetFrameFd', help="set Frame from database to normal (not FD). (comma separated list)\nSyntax: --unsetFrameFd=myFrame1,mySecondFrame")
@click.option('--recalcDLC', 'recalcDLC', help="recalculate dlc; max: use maximum of stored and calculated dlc; force: force new calculated dlc")
@click.option('--skipLongDlc', 'skipLongDlc', help="skip all Frames with dlc bigger than given threshold")
@click.option('--cutLongFrames', 'cutLongFrames', help="cut all signals out of Frames with dlc bigger than given threshold")
@click.option('--deleteFrameAttributes', 'deleteFrameAttributes', help="delete attributes from all frames\nExample --deleteFrameAttributes GenMsgSomeVar,CycleTime")
@click.option('--ecus', help="Copy only given ECUs (comma separated list) to target matrix; suffix 'rx' or 'tx' for selection: Example: --ecus FirstEcu:rx,SecondEcu:tx,ThirdEcu")
@click.option('--frames', help="Copy only given Frames (comma separated list) to target matrix")
@click.option('--signals', help="Copy only given Signals (comma separated list) to target matrix just as 'free' signals without containing frame")
@click.option('--merge', help="merge additional can databases.\nSyntax: --merge filename[:ecu=SOMEECU][:frame=FRAME1][:frame=FRAME2],filename2")
# arxml switches
@click.option('--arxmlIgnoreClusterInfo/--no-arxmlIgnoreClusterInfo', 'arxmlIgnoreClusterInfo', default=False, help="Ignore any can cluster info from arxml; Import all frames in one matrix\ndefault False")
@click.option('--arxmlUseXpath(--no-arxmlUseXpath', 'arxmlUseXpath', default=False, help="Use experimental Xpath-Implementation for resolving AR-Paths; \ndefault False") | @click.option('--dbcExportEncoding', 'dbcExportEncoding', default="iso-8859-1", help="Export charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcExportCommentEncoding', 'dbcExportCommentEncoding', default="iso-8859-1", help="Export charset of comments in dbc\ndefault iso-8859-1")
@click.option('--dbcUniqueSignalNames/--no-dbcUniqueSignalNames', 'dbcUniqueSignalNames', default=True, help="Check if signal names are unique per frame")
# dbf switches
@click.option('--dbfImportEncoding', 'dbfImportEncoding', default="iso-8859-1", help="Import charset of dbf, maybe utf-8\ndefault iso-8859-1")
@click.option('--dbfExportEncoding', 'dbfExportEncoding', default="iso-8859-1", help="Export charset of dbf, maybe utf-8\ndefault iso-8859-1")
# sym switches
@click.option('--symImportEncoding', 'symImportEncoding', default="iso-8859-1", help="Import charset of sym format, maybe utf-8\ndefault iso-8859-1")
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# xls/csv switches
@click.option('--xlsMotorolaBitFormat', 'xlsMotorolaBitFormat', default="msbreverse", help="Excel format for startbit of motorola codescharset signals\nValid values: msb, lsb, msbreverse\n default msbreverse")
@click.option('--additionalFrameAttributes', 'additionalFrameAttributes', default = "", help="append columns to csv/xls(x), example: is_fd")
@click.option('--additionalSignalAttributes', 'additionalSignalAttributes', default = "", help="append columns to csv/xls(x), example: is_signed,attributes[\"GenSigStartValue\"]")
@click.option('--xlsValuesInSeperateLines/--no-xlsValuesInSeperateLines', 'xlsValuesInSeperateLines', default = False, help="Excel format: create seperate line for each value of signal value table\tdefault: False")
# json switches
@click.option('--jsonExportCanard/--no-jsonExportCanard', 'jsonExportCanard', default=False, help="Export Canard compatible json format")
@click.option('--jsonExportAll/--no-jsonExportAll', 'jsonExportAll', default=False, help="Export more data to json format")
@click.option('--jsonMotorolaBitFormat', 'jsonMotorolaBitFormat', default="lsb", help="Json format: startbit of motorola signals\nValid values: msb, lsb, msbreverse\n default lsb")
@click.option('--jsonNativeTypes/--no-jsonNativeTypes', 'jsonNativeTypes', default=False, help="Uses native json representation for decimals instead of string.")
#sym switches
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# in and out file
@click.argument('infile', required=True)
@click.argument('outfile', required=True)
#
def cli_convert(infile, outfile, silent, verbosity, **options):
"""
canmatrix.cli.convert [options] import-file export-file
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym
export-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym|*.py
\n"""
root_logger = canmatrix.log.setup_logger()
if silent is True:
# only print error messages, ignore verbosity flag
verbosity = -1
options["silent"] = True
canmatrix.log.set_log_level(root_logger, verbosity)
if options["ignoreEncodingErrors"]:
options["ignoreEncodingErrors"] = "ignore"
else:
options["ignoreEncodingErrors"] = ""
canmatrix.convert.convert(infile, outfile, **options)
return 0
if __name__ == '__main__':
sys.exit(cli_convert()) | @click.option('--arxmlExportVersion', 'arVersion', default="3.2.3", help="Set output AUTOSAR version\ncurrently only 3.2.3 and 4.1.0 are supported\ndefault 3.2.3")
# dbc switches
@click.option('--dbcImportEncoding', 'dbcImportEncoding', default="iso-8859-1", help="Import charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcImportCommentEncoding', 'dbcImportCommentEncoding', default="iso-8859-1", help="Import charset of Comments in dbc\ndefault iso-8859-1") | random_line_split |
convert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
import logging
import sys
import click
import canmatrix.convert
import canmatrix.log
logger = logging.getLogger(__name__)
def get_formats():
input = ""
output = ""
for suppFormat, features in canmatrix.formats.supportedFormats.items():
if 'load' in features:
|
if 'dump' in features:
output += suppFormat + "\n"
return (input, output)
@click.command()
# global switches
@click.option('-v', '--verbose', 'verbosity', count=True, default=1)
@click.option('-s', '--silent/--no-silent', is_flag=True, default=False, help="don't print status messages to stdout. (only errors)")
@click.option('-f', '--force_output', help="enforce output format, ignoring output file extension (e.g., -f csv).\nSupported formats for writing:\n" + get_formats()[1])
@click.option('-i', '--input_format', 'import_type', help="give hint for input format\nSupported formats for reading:\n" + get_formats()[0])
@click.option('--ignoreEncodingErrors/--no-ignoreEncodingErrors', 'ignoreEncodingErrors', default=False, help="ignore character encoding errors during export (dbc,dbf,sym)")
# manipulation and filter switches
@click.option('--deleteObsoleteDefines/--no-deleteObsoleteDefines', 'deleteObsoleteDefines', default=False, help="delete defines from all ECUs, frames and Signals\nExample --deleteObsoleteDefines")
@click.option('--deleteEcu', 'deleteEcu', help="delete Ecu form databases. (comma separated list)\nSyntax: --deleteEcu=myEcu,mySecondEcu")
@click.option('--renameEcu', 'renameEcu', help="rename Ecu form databases. (comma separated list)\nSyntax: --renameEcu=myOldEcu:myNewEcu,mySecondEcu:mySecondNewEcu")
@click.option('--deleteSignal', 'deleteSignal', help="delete Signal form databases. (comma separated list)\nSyntax: --deleteSignal=mySignal1,mySecondSignal")
@click.option('--renameSignal', 'renameSignal', help="rename Signal form databases. (comma separated list)\nSyntax: --renameSignal=myOldSignal:myNewSignal,mySecondSignal:mySecondNewSignal")
@click.option('--deleteZeroSignals/--no-deleteZeroSignals', 'deleteZeroSignals', default=False, help="delete zero length signals (signals with 0 bit length) from matrix\ndefault False")
@click.option('--deleteSignalAttributes', 'deleteSignalAttributes', help="delete attributes from all signals\nExample --deleteSignalAttributes GenMsgSomeVar,CycleTime")
@click.option('--deleteFrame', 'deleteFrame', help="delete Frame form databases. (comma separated list)\nSyntax: --deleteFrame=myFrame1,mySecondFrame")
@click.option('--renameFrame', 'renameFrame', help="increment each frame.id in database by increment\nSyntax: --frameIdIncrement=increment")
@click.option('--addFrameReceiver', 'addFrameReceiver', help="add receiver Ecu to frame(s) (comma separated list)\nSyntax: --addFrameReceiver=framename:myNewEcu,mySecondEcu:myNEWEcu")
@click.option('--changeFrameId', 'changeFrameId', help="change frame.id in database\nSyntax: --changeFrameId=oldId:newId")
@click.option('--setFrameFd', 'setFrameFd', help="set Frame from database to canfd. (comma separated list)\nSyntax: --setFrameFd=myFrame1,mySecondFrame")
@click.option('--unsetFrameFd', 'unsetFrameFd', help="set Frame from database to normal (not FD). (comma separated list)\nSyntax: --unsetFrameFd=myFrame1,mySecondFrame")
@click.option('--recalcDLC', 'recalcDLC', help="recalculate dlc; max: use maximum of stored and calculated dlc; force: force new calculated dlc")
@click.option('--skipLongDlc', 'skipLongDlc', help="skip all Frames with dlc bigger than given threshold")
@click.option('--cutLongFrames', 'cutLongFrames', help="cut all signals out of Frames with dlc bigger than given threshold")
@click.option('--deleteFrameAttributes', 'deleteFrameAttributes', help="delete attributes from all frames\nExample --deleteFrameAttributes GenMsgSomeVar,CycleTime")
@click.option('--ecus', help="Copy only given ECUs (comma separated list) to target matrix; suffix 'rx' or 'tx' for selection: Example: --ecus FirstEcu:rx,SecondEcu:tx,ThirdEcu")
@click.option('--frames', help="Copy only given Frames (comma separated list) to target matrix")
@click.option('--signals', help="Copy only given Signals (comma separated list) to target matrix just as 'free' signals without containing frame")
@click.option('--merge', help="merge additional can databases.\nSyntax: --merge filename[:ecu=SOMEECU][:frame=FRAME1][:frame=FRAME2],filename2")
# arxml switches
@click.option('--arxmlIgnoreClusterInfo/--no-arxmlIgnoreClusterInfo', 'arxmlIgnoreClusterInfo', default=False, help="Ignore any can cluster info from arxml; Import all frames in one matrix\ndefault False")
@click.option('--arxmlUseXpath(--no-arxmlUseXpath', 'arxmlUseXpath', default=False, help="Use experimental Xpath-Implementation for resolving AR-Paths; \ndefault False")
@click.option('--arxmlExportVersion', 'arVersion', default="3.2.3", help="Set output AUTOSAR version\ncurrently only 3.2.3 and 4.1.0 are supported\ndefault 3.2.3")
# dbc switches
@click.option('--dbcImportEncoding', 'dbcImportEncoding', default="iso-8859-1", help="Import charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcImportCommentEncoding', 'dbcImportCommentEncoding', default="iso-8859-1", help="Import charset of Comments in dbc\ndefault iso-8859-1")
@click.option('--dbcExportEncoding', 'dbcExportEncoding', default="iso-8859-1", help="Export charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcExportCommentEncoding', 'dbcExportCommentEncoding', default="iso-8859-1", help="Export charset of comments in dbc\ndefault iso-8859-1")
@click.option('--dbcUniqueSignalNames/--no-dbcUniqueSignalNames', 'dbcUniqueSignalNames', default=True, help="Check if signal names are unique per frame")
# dbf switches
@click.option('--dbfImportEncoding', 'dbfImportEncoding', default="iso-8859-1", help="Import charset of dbf, maybe utf-8\ndefault iso-8859-1")
@click.option('--dbfExportEncoding', 'dbfExportEncoding', default="iso-8859-1", help="Export charset of dbf, maybe utf-8\ndefault iso-8859-1")
# sym switches
@click.option('--symImportEncoding', 'symImportEncoding', default="iso-8859-1", help="Import charset of sym format, maybe utf-8\ndefault iso-8859-1")
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# xls/csv switches
@click.option('--xlsMotorolaBitFormat', 'xlsMotorolaBitFormat', default="msbreverse", help="Excel format for startbit of motorola codescharset signals\nValid values: msb, lsb, msbreverse\n default msbreverse")
@click.option('--additionalFrameAttributes', 'additionalFrameAttributes', default = "", help="append columns to csv/xls(x), example: is_fd")
@click.option('--additionalSignalAttributes', 'additionalSignalAttributes', default = "", help="append columns to csv/xls(x), example: is_signed,attributes[\"GenSigStartValue\"]")
@click.option('--xlsValuesInSeperateLines/--no-xlsValuesInSeperateLines', 'xlsValuesInSeperateLines', default = False, help="Excel format: create seperate line for each value of signal value table\tdefault: False")
# json switches
@click.option('--jsonExportCanard/--no-jsonExportCanard', 'jsonExportCanard', default=False, help="Export Canard compatible json format")
@click.option('--jsonExportAll/--no-jsonExportAll', 'jsonExportAll', default=False, help="Export more data to json format")
@click.option('--jsonMotorolaBitFormat', 'jsonMotorolaBitFormat', default="lsb", help="Json format: startbit of motorola signals\nValid values: msb, lsb, msbreverse\n default lsb")
@click.option('--jsonNativeTypes/--no-jsonNativeTypes', 'jsonNativeTypes', default=False, help="Uses native json representation for decimals instead of string.")
#sym switches
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# in and out file
@click.argument('infile', required=True)
@click.argument('outfile', required=True)
#
def cli_convert(infile, outfile, silent, verbosity, **options):
"""
canmatrix.cli.convert [options] import-file export-file
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym
export-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym|*.py
\n"""
root_logger = canmatrix.log.setup_logger()
if silent is True:
# only print error messages, ignore verbosity flag
verbosity = -1
options["silent"] = True
canmatrix.log.set_log_level(root_logger, verbosity)
if options["ignoreEncodingErrors"]:
options["ignoreEncodingErrors"] = "ignore"
else:
options["ignoreEncodingErrors"] = ""
canmatrix.convert.convert(infile, outfile, **options)
return 0
if __name__ == '__main__':
sys.exit(cli_convert())
| input += suppFormat + "\n" | conditional_block |
struct-partial-move-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Show)]
pub struct Partial<T> { x: T, y: T }
#[derive(PartialEq, Show)]
struct S { val: int }
impl S { fn new(v: int) -> S { S { val: v } } }
impl Drop for S { fn drop(&mut self) { } } | // `..p` moves all fields *except* `p.y` in this context.
Partial { y: f(p.y), ..p }
}
pub fn main() {
let p = f((S::new(3), S::new(4)), |S { val: z }| S::new(z+1));
assert_eq!(p, Partial { x: S::new(3), y: S::new(5) });
} |
pub fn f<T, F>((b1, b2): (T, T), mut f: F) -> Partial<T> where F: FnMut(T) -> T {
let p = Partial { x: b1, y: b2 };
// Move of `p` is legal even though we are also moving `p.y`; the | random_line_split |
struct-partial-move-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Show)]
pub struct Partial<T> { x: T, y: T }
#[derive(PartialEq, Show)]
struct S { val: int }
impl S { fn | (v: int) -> S { S { val: v } } }
impl Drop for S { fn drop(&mut self) { } }
pub fn f<T, F>((b1, b2): (T, T), mut f: F) -> Partial<T> where F: FnMut(T) -> T {
let p = Partial { x: b1, y: b2 };
// Move of `p` is legal even though we are also moving `p.y`; the
// `..p` moves all fields *except* `p.y` in this context.
Partial { y: f(p.y), ..p }
}
pub fn main() {
let p = f((S::new(3), S::new(4)), |S { val: z }| S::new(z+1));
assert_eq!(p, Partial { x: S::new(3), y: S::new(5) });
}
| new | identifier_name |
struct-partial-move-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Show)]
pub struct Partial<T> { x: T, y: T }
#[derive(PartialEq, Show)]
struct S { val: int }
impl S { fn new(v: int) -> S { S { val: v } } }
impl Drop for S { fn drop(&mut self) { } }
pub fn f<T, F>((b1, b2): (T, T), mut f: F) -> Partial<T> where F: FnMut(T) -> T |
pub fn main() {
let p = f((S::new(3), S::new(4)), |S { val: z }| S::new(z+1));
assert_eq!(p, Partial { x: S::new(3), y: S::new(5) });
}
| {
let p = Partial { x: b1, y: b2 };
// Move of `p` is legal even though we are also moving `p.y`; the
// `..p` moves all fields *except* `p.y` in this context.
Partial { y: f(p.y), ..p }
} | identifier_body |
package.js | /* global Package, Npm */
Package.describe({
name: 'procempa:keycloak-auth',
version: '1.0.0',
summary: 'Meteor Keycloak Handshake flow',
git: 'https://github.com/Procempa/meteor-keycloak-auth.git',
documentation: 'README.md' |
Package.onUse(function(api) {
api.use('[email protected]');
api.use('[email protected]');
api.export('KeycloakServer', 'server');
api.export('KeycloakClient', 'client');
api.mainModule('client-main.js', 'client');
api.mainModule('server-main.js', 'server');
});
Npm.depends({
'lodash': '4.16.1',
'fallbackjs': '1.1.8',
'localforage': '1.4.2',
'keycloak-auth-utils': '2.2.1',
'babel-plugin-transform-decorators-legacy': '1.3.4',
'babel-plugin-transform-class-properties': '6.11.5',
'babel-plugin-transform-strict-mode': '6.11.3',
'q': '1.4.1'
}); | }); | random_line_split |
index_controller.js | /**
* Controller for single index detail
*/
import _ from 'lodash';
import uiRoutes from 'ui/routes';
import uiModules from 'ui/modules';
import routeInitProvider from 'plugins/monitoring/lib/route_init';
import ajaxErrorHandlersProvider from 'plugins/monitoring/lib/ajax_error_handler';
import template from 'plugins/monitoring/views/elasticsearch/index/index_template.html';
uiRoutes.when('/elasticsearch/indices/:index', {
template,
resolve: {
clusters: function (Private) {
const routeInit = Private(routeInitProvider);
return routeInit();
},
pageData: getPageData
}
});
function getPageData(timefilter, globalState, $route, $http, Private) {
const timeBounds = timefilter.getBounds();
const url = `../api/monitoring/v1/clusters/${globalState.cluster_uuid}/elasticsearch/indices/${$route.current.params.index}`;
return $http.post(url, {
timeRange: {
min: timeBounds.min.toISOString(),
max: timeBounds.max.toISOString()
},
metrics: [
'index_search_request_rate',
{
name: 'index_request_rate',
keys: [
'index_request_rate_total',
'index_request_rate_primary'
]
},
'index_size',
{
name: 'index_mem',
keys: [ 'index_mem_overall' ],
config: 'xpack.monitoring.chart.elasticsearch.index.index_memory'
},
'index_document_count',
'index_segment_count'
]
})
.then(response => response.data)
.catch((err) => {
const ajaxErrorHandlers = Private(ajaxErrorHandlersProvider);
return ajaxErrorHandlers(err);
});
}
const uiModule = uiModules.get('monitoring', []);
uiModule.controller('indexView', (timefilter, $route, title, Private, globalState, $executor, $http, monitoringClusters, $scope) => {
timefilter.enabled = true;
function | (clusters) {
$scope.clusters = clusters;
$scope.cluster = _.find($scope.clusters, { cluster_uuid: globalState.cluster_uuid });
}
setClusters($route.current.locals.clusters);
$scope.pageData = $route.current.locals.pageData;
$scope.indexName = $route.current.params.index;
title($scope.cluster, `Elasticsearch - Indices - ${$scope.indexName}`);
$executor.register({
execute: () => getPageData(timefilter, globalState, $route, $http, Private),
handleResponse: (response) => $scope.pageData = response
});
$executor.register({
execute: () => monitoringClusters(),
handleResponse: setClusters
});
// Start the executor
$executor.start();
// Destory the executor
$scope.$on('$destroy', $executor.destroy);
});
| setClusters | identifier_name |
index_controller.js | /**
* Controller for single index detail
*/
import _ from 'lodash';
import uiRoutes from 'ui/routes';
import uiModules from 'ui/modules';
import routeInitProvider from 'plugins/monitoring/lib/route_init';
import ajaxErrorHandlersProvider from 'plugins/monitoring/lib/ajax_error_handler';
import template from 'plugins/monitoring/views/elasticsearch/index/index_template.html';
uiRoutes.when('/elasticsearch/indices/:index', {
template,
resolve: {
clusters: function (Private) {
const routeInit = Private(routeInitProvider);
return routeInit();
},
pageData: getPageData
}
});
function getPageData(timefilter, globalState, $route, $http, Private) {
const timeBounds = timefilter.getBounds();
const url = `../api/monitoring/v1/clusters/${globalState.cluster_uuid}/elasticsearch/indices/${$route.current.params.index}`;
return $http.post(url, {
timeRange: {
min: timeBounds.min.toISOString(),
max: timeBounds.max.toISOString()
},
metrics: [
'index_search_request_rate',
{
name: 'index_request_rate',
keys: [
'index_request_rate_total', | 'index_size',
{
name: 'index_mem',
keys: [ 'index_mem_overall' ],
config: 'xpack.monitoring.chart.elasticsearch.index.index_memory'
},
'index_document_count',
'index_segment_count'
]
})
.then(response => response.data)
.catch((err) => {
const ajaxErrorHandlers = Private(ajaxErrorHandlersProvider);
return ajaxErrorHandlers(err);
});
}
const uiModule = uiModules.get('monitoring', []);
uiModule.controller('indexView', (timefilter, $route, title, Private, globalState, $executor, $http, monitoringClusters, $scope) => {
timefilter.enabled = true;
function setClusters(clusters) {
$scope.clusters = clusters;
$scope.cluster = _.find($scope.clusters, { cluster_uuid: globalState.cluster_uuid });
}
setClusters($route.current.locals.clusters);
$scope.pageData = $route.current.locals.pageData;
$scope.indexName = $route.current.params.index;
title($scope.cluster, `Elasticsearch - Indices - ${$scope.indexName}`);
$executor.register({
execute: () => getPageData(timefilter, globalState, $route, $http, Private),
handleResponse: (response) => $scope.pageData = response
});
$executor.register({
execute: () => monitoringClusters(),
handleResponse: setClusters
});
// Start the executor
$executor.start();
// Destory the executor
$scope.$on('$destroy', $executor.destroy);
}); | 'index_request_rate_primary'
]
}, | random_line_split |
index_controller.js | /**
* Controller for single index detail
*/
import _ from 'lodash';
import uiRoutes from 'ui/routes';
import uiModules from 'ui/modules';
import routeInitProvider from 'plugins/monitoring/lib/route_init';
import ajaxErrorHandlersProvider from 'plugins/monitoring/lib/ajax_error_handler';
import template from 'plugins/monitoring/views/elasticsearch/index/index_template.html';
uiRoutes.when('/elasticsearch/indices/:index', {
template,
resolve: {
clusters: function (Private) {
const routeInit = Private(routeInitProvider);
return routeInit();
},
pageData: getPageData
}
});
function getPageData(timefilter, globalState, $route, $http, Private) |
const uiModule = uiModules.get('monitoring', []);
uiModule.controller('indexView', (timefilter, $route, title, Private, globalState, $executor, $http, monitoringClusters, $scope) => {
timefilter.enabled = true;
function setClusters(clusters) {
$scope.clusters = clusters;
$scope.cluster = _.find($scope.clusters, { cluster_uuid: globalState.cluster_uuid });
}
setClusters($route.current.locals.clusters);
$scope.pageData = $route.current.locals.pageData;
$scope.indexName = $route.current.params.index;
title($scope.cluster, `Elasticsearch - Indices - ${$scope.indexName}`);
$executor.register({
execute: () => getPageData(timefilter, globalState, $route, $http, Private),
handleResponse: (response) => $scope.pageData = response
});
$executor.register({
execute: () => monitoringClusters(),
handleResponse: setClusters
});
// Start the executor
$executor.start();
// Destory the executor
$scope.$on('$destroy', $executor.destroy);
});
| {
const timeBounds = timefilter.getBounds();
const url = `../api/monitoring/v1/clusters/${globalState.cluster_uuid}/elasticsearch/indices/${$route.current.params.index}`;
return $http.post(url, {
timeRange: {
min: timeBounds.min.toISOString(),
max: timeBounds.max.toISOString()
},
metrics: [
'index_search_request_rate',
{
name: 'index_request_rate',
keys: [
'index_request_rate_total',
'index_request_rate_primary'
]
},
'index_size',
{
name: 'index_mem',
keys: [ 'index_mem_overall' ],
config: 'xpack.monitoring.chart.elasticsearch.index.index_memory'
},
'index_document_count',
'index_segment_count'
]
})
.then(response => response.data)
.catch((err) => {
const ajaxErrorHandlers = Private(ajaxErrorHandlersProvider);
return ajaxErrorHandlers(err);
});
} | identifier_body |
adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
from pylib import android_commands
from pylib import test_options_parser
from pylib import constants
def InstallApk(args):
|
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddBuildTypeOption(parser)
test_options_parser.AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
if len(args) > 1:
raise Exception('Error: Unknown argument:', args[1:])
devices = android_commands.GetAttachedDevices()
if not devices:
raise Exception('Error: no connected devices')
pool = multiprocessing.Pool(len(devices))
# Send a tuple (options, device) per instance of DeploySingleDevice.
pool.map(InstallApk, zip([options] * len(devices), devices))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| options, device = args
apk_path = os.path.join(os.environ['CHROME_SRC'],
'out', options.build_type,
'apks', options.apk)
result = android_commands.AndroidCommands(device=device).ManagedInstall(
apk_path, False, options.apk_package)
print '----- Installed on %s -----' % device
print result | identifier_body |
adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
from pylib import android_commands
from pylib import test_options_parser
from pylib import constants
def InstallApk(args):
options, device = args
apk_path = os.path.join(os.environ['CHROME_SRC'],
'out', options.build_type,
'apks', options.apk)
result = android_commands.AndroidCommands(device=device).ManagedInstall(
apk_path, False, options.apk_package)
print '----- Installed on %s -----' % device
print result
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddBuildTypeOption(parser)
test_options_parser.AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
if len(args) > 1:
|
devices = android_commands.GetAttachedDevices()
if not devices:
raise Exception('Error: no connected devices')
pool = multiprocessing.Pool(len(devices))
# Send a tuple (options, device) per instance of DeploySingleDevice.
pool.map(InstallApk, zip([options] * len(devices), devices))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| raise Exception('Error: Unknown argument:', args[1:]) | conditional_block |
adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
from pylib import android_commands
from pylib import test_options_parser
from pylib import constants
def InstallApk(args):
options, device = args
apk_path = os.path.join(os.environ['CHROME_SRC'],
'out', options.build_type,
'apks', options.apk)
result = android_commands.AndroidCommands(device=device).ManagedInstall(
apk_path, False, options.apk_package)
print '----- Installed on %s -----' % device
print result
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddBuildTypeOption(parser)
test_options_parser.AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
if len(args) > 1:
raise Exception('Error: Unknown argument:', args[1:])
devices = android_commands.GetAttachedDevices()
if not devices:
raise Exception('Error: no connected devices')
pool = multiprocessing.Pool(len(devices))
# Send a tuple (options, device) per instance of DeploySingleDevice. |
if __name__ == '__main__':
sys.exit(main(sys.argv)) | pool.map(InstallApk, zip([options] * len(devices), devices)) | random_line_split |
adb_install_apk.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
from pylib import android_commands
from pylib import test_options_parser
from pylib import constants
def | (args):
options, device = args
apk_path = os.path.join(os.environ['CHROME_SRC'],
'out', options.build_type,
'apks', options.apk)
result = android_commands.AndroidCommands(device=device).ManagedInstall(
apk_path, False, options.apk_package)
print '----- Installed on %s -----' % device
print result
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddBuildTypeOption(parser)
test_options_parser.AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
if len(args) > 1:
raise Exception('Error: Unknown argument:', args[1:])
devices = android_commands.GetAttachedDevices()
if not devices:
raise Exception('Error: no connected devices')
pool = multiprocessing.Pool(len(devices))
# Send a tuple (options, device) per instance of DeploySingleDevice.
pool.map(InstallApk, zip([options] * len(devices), devices))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| InstallApk | identifier_name |
sdb_dump_patch.py | import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def | (sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
| _main | identifier_name |
sdb_dump_patch.py | import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
|
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
| soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset | identifier_body |
sdb_dump_patch.py | import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64" | d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main() |
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32: | random_line_split |
sdb_dump_patch.py | import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
|
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
| print(" " + l) | conditional_block |
chokidarWatcherService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as chokidar from 'vscode-chokidar';
import * as fs from 'fs';
import * as gracefulFs from 'graceful-fs';
gracefulFs.gracefulify(fs);
import * as paths from 'vs/base/common/paths';
import * as glob from 'vs/base/common/glob';
import { TPromise } from 'vs/base/common/winjs.base';
import { FileChangeType } from 'vs/platform/files/common/files';
import { ThrottledDelayer } from 'vs/base/common/async';
import * as strings from 'vs/base/common/strings';
import { normalizeNFC } from 'vs/base/common/normalization';
import { realcaseSync } from 'vs/base/node/extfs';
import { isMacintosh } from 'vs/base/common/platform';
import * as watcherCommon from 'vs/workbench/services/files/node/watcher/common';
import { IWatcherRequest, IWatcherService, IWatcherOptions, IWatchError } from 'vs/workbench/services/files/node/watcher/unix/watcher';
import { Emitter, Event } from 'vs/base/common/event';
interface IWatcher {
requests: ExtendedWatcherRequest[];
stop(): any;
}
export interface IChockidarWatcherOptions {
pollingInterval?: number;
}
interface ExtendedWatcherRequest extends IWatcherRequest {
parsedPattern?: glob.ParsedPattern;
}
export class ChokidarWatcherService implements IWatcherService {
private static readonly FS_EVENT_DELAY = 50; // aggregate and only emit events when changes have stopped for this duration (in ms)
private static readonly EVENT_SPAM_WARNING_THRESHOLD = 60 * 1000; // warn after certain time span of event spam
private _watchers: { [watchPath: string]: IWatcher };
private _watcherCount: number;
private _options: IWatcherOptions & IChockidarWatcherOptions;
private spamCheckStartTime: number;
private spamWarningLogged: boolean;
private enospcErrorLogged: boolean;
private _onWatchEvent = new Emitter<watcherCommon.IRawFileChange[] | IWatchError>();
readonly onWatchEvent = this._onWatchEvent.event;
watch(options: IWatcherOptions & IChockidarWatcherOptions): Event<watcherCommon.IRawFileChange[] | IWatchError> {
this._options = options;
this._watchers = Object.create(null);
this._watcherCount = 0;
return this.onWatchEvent;
}
public setRoots(requests: IWatcherRequest[]): TPromise<void> {
const watchers = Object.create(null);
const newRequests = [];
const requestsByBasePath = normalizeRoots(requests);
// evaluate new & remaining watchers
for (let basePath in requestsByBasePath) {
let watcher = this._watchers[basePath];
if (watcher && isEqualRequests(watcher.requests, requestsByBasePath[basePath])) {
watchers[basePath] = watcher;
delete this._watchers[basePath];
} else {
newRequests.push(basePath);
}
}
// stop all old watchers
for (let path in this._watchers) {
this._watchers[path].stop();
}
// start all new watchers
for (let basePath of newRequests) {
let requests = requestsByBasePath[basePath];
watchers[basePath] = this._watch(basePath, requests);
}
this._watchers = watchers;
return TPromise.as(null);
}
// for test purposes
public get wacherCount() {
return this._watcherCount;
}
private _watch(basePath: string, requests: IWatcherRequest[]): IWatcher {
if (this._options.verboseLogging) {
console.log(`Start watching: ${basePath}]`);
}
const pollingInterval = this._options.pollingInterval || 1000;
const watcherOpts: chokidar.IOptions = {
ignoreInitial: true,
ignorePermissionErrors: true,
followSymlinks: true, // this is the default of chokidar and supports file events through symlinks
interval: pollingInterval, // while not used in normal cases, if any error causes chokidar to fallback to polling, increase its intervals
binaryInterval: pollingInterval,
disableGlobbing: true // fix https://github.com/Microsoft/vscode/issues/4586
};
// if there's only one request, use the built-in ignore-filterering
if (requests.length === 1) {
watcherOpts.ignored = requests[0].ignored;
}
// Chokidar fails when the basePath does not match case-identical to the path on disk
// so we have to find the real casing of the path and do some path massaging to fix this
// see https://github.com/paulmillr/chokidar/issues/418
const realBasePath = isMacintosh ? (realcaseSync(basePath) || basePath) : basePath;
const realBasePathLength = realBasePath.length;
const realBasePathDiffers = (basePath !== realBasePath);
if (realBasePathDiffers) {
console.warn(`Watcher basePath does not match version on disk and was corrected (original: ${basePath}, real: ${realBasePath})`);
}
let chokidarWatcher = chokidar.watch(realBasePath, watcherOpts);
this._watcherCount++;
// Detect if for some reason the native watcher library fails to load
if (isMacintosh && !chokidarWatcher.options.useFsEvents) {
console.error('Watcher is not using native fsevents library and is falling back to unefficient polling.');
}
let undeliveredFileEvents: watcherCommon.IRawFileChange[] = [];
let fileEventDelayer = new ThrottledDelayer(ChokidarWatcherService.FS_EVENT_DELAY);
const watcher: IWatcher = {
requests,
stop: () => {
try {
if (this._options.verboseLogging) {
console.log(`Stop watching: ${basePath}]`);
}
if (chokidarWatcher) {
chokidarWatcher.close();
this._watcherCount--;
chokidarWatcher = null;
}
if (fileEventDelayer) {
fileEventDelayer.cancel();
fileEventDelayer = null;
}
} catch (error) {
console.error(error.toString());
}
}
};
chokidarWatcher.on('all', (type: string, path: string) => {
if (isMacintosh) {
// Mac: uses NFD unicode form on disk, but we want NFC
// See also https://github.com/nodejs/node/issues/2165
path = normalizeNFC(path);
}
if (path.indexOf(realBasePath) < 0) {
return; // we really only care about absolute paths here in our basepath context here
}
// Make sure to convert the path back to its original basePath form if the realpath is different
if (realBasePathDiffers) {
path = basePath + path.substr(realBasePathLength);
}
let eventType: FileChangeType;
switch (type) {
case 'change':
eventType = FileChangeType.UPDATED;
break;
case 'add':
case 'addDir':
eventType = FileChangeType.ADDED;
break;
case 'unlink':
case 'unlinkDir':
eventType = FileChangeType.DELETED;
break;
default:
return;
}
if (isIgnored(path, watcher.requests)) {
return;
}
let event = { type: eventType, path };
// Logging
if (this._options.verboseLogging) {
console.log(eventType === FileChangeType.ADDED ? '[ADDED]' : eventType === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', path);
}
// Check for spam
const now = Date.now();
if (undeliveredFileEvents.length === 0) {
this.spamWarningLogged = false;
this.spamCheckStartTime = now;
} else if (!this.spamWarningLogged && this.spamCheckStartTime + ChokidarWatcherService.EVENT_SPAM_WARNING_THRESHOLD < now) {
this.spamWarningLogged = true;
console.warn(strings.format('Watcher is busy catching up with {0} file changes in 60 seconds. Latest changed path is "{1}"', undeliveredFileEvents.length, event.path));
}
// Add to buffer
undeliveredFileEvents.push(event);
// Delay and send buffer
fileEventDelayer.trigger(() => {
const events = undeliveredFileEvents;
undeliveredFileEvents = [];
// Broadcast to clients normalized
const res = watcherCommon.normalize(events);
this._onWatchEvent.fire(res);
// Logging
if (this._options.verboseLogging) {
res.forEach(r => {
console.log(' >> normalized', r.type === FileChangeType.ADDED ? '[ADDED]' : r.type === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', r.path);
});
}
return TPromise.as(null);
});
});
chokidarWatcher.on('error', (error: Error) => {
if (error) {
// Specially handle ENOSPC errors that can happen when
// the watcher consumes so many file descriptors that
// we are running into a limit. We only want to warn
// once in this case to avoid log spam.
// See https://github.com/Microsoft/vscode/issues/7950
if ((<any>error).code === 'ENOSPC') {
if (!this.enospcErrorLogged) {
this.enospcErrorLogged = true;
this.stop();
this._onWatchEvent.fire({ message: 'Inotify limit reached (ENOSPC)' });
}
} else {
console.error(error.toString());
}
}
});
return watcher;
}
public stop(): TPromise<void> {
for (let path in this._watchers) {
let watcher = this._watchers[path];
watcher.stop();
}
this._watchers = Object.create(null);
return TPromise.as(void 0);
}
}
function isIgnored(path: string, requests: ExtendedWatcherRequest[]): boolean |
/**
* Normalizes a set of root paths by grouping by the most parent root path.
* equests with Sub paths are skipped if they have the same ignored set as the parent.
*/
export function normalizeRoots(requests: IWatcherRequest[]): { [basePath: string]: IWatcherRequest[] } {
requests = requests.sort((r1, r2) => r1.basePath.localeCompare(r2.basePath));
let prevRequest: IWatcherRequest = null;
let result: { [basePath: string]: IWatcherRequest[] } = Object.create(null);
for (let request of requests) {
let basePath = request.basePath;
let ignored = (request.ignored || []).sort();
if (prevRequest && (paths.isEqualOrParent(basePath, prevRequest.basePath))) {
if (!isEqualIgnore(ignored, prevRequest.ignored)) {
result[prevRequest.basePath].push({ basePath, ignored });
}
} else {
prevRequest = { basePath, ignored };
result[basePath] = [prevRequest];
}
}
return result;
}
function isEqualRequests(r1: IWatcherRequest[], r2: IWatcherRequest[]) {
if (r1.length !== r2.length) {
return false;
}
for (let k = 0; k < r1.length; k++) {
if (r1[k].basePath !== r2[k].basePath || !isEqualIgnore(r1[k].ignored, r2[k].ignored)) {
return false;
}
}
return true;
}
function isEqualIgnore(i1: string[], i2: string[]) {
if (i1.length !== i2.length) {
return false;
}
for (let k = 0; k < i1.length; k++) {
if (i1[k] !== i2[k]) {
return false;
}
}
return true;
}
| {
for (let request of requests) {
if (request.basePath === path) {
return false;
}
if (paths.isEqualOrParent(path, request.basePath)) {
if (!request.parsedPattern) {
if (request.ignored && request.ignored.length > 0) {
let pattern = `{${request.ignored.map(i => i + '/**').join(',')}}`;
request.parsedPattern = glob.parse(pattern);
} else {
request.parsedPattern = () => false;
}
}
const relPath = path.substr(request.basePath.length + 1);
if (!request.parsedPattern(relPath)) {
return false;
}
}
}
return true;
} | identifier_body |
chokidarWatcherService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as chokidar from 'vscode-chokidar';
import * as fs from 'fs';
import * as gracefulFs from 'graceful-fs';
gracefulFs.gracefulify(fs);
import * as paths from 'vs/base/common/paths';
import * as glob from 'vs/base/common/glob';
import { TPromise } from 'vs/base/common/winjs.base';
import { FileChangeType } from 'vs/platform/files/common/files';
import { ThrottledDelayer } from 'vs/base/common/async';
import * as strings from 'vs/base/common/strings';
import { normalizeNFC } from 'vs/base/common/normalization';
import { realcaseSync } from 'vs/base/node/extfs';
import { isMacintosh } from 'vs/base/common/platform';
import * as watcherCommon from 'vs/workbench/services/files/node/watcher/common';
import { IWatcherRequest, IWatcherService, IWatcherOptions, IWatchError } from 'vs/workbench/services/files/node/watcher/unix/watcher';
import { Emitter, Event } from 'vs/base/common/event';
interface IWatcher {
requests: ExtendedWatcherRequest[];
stop(): any;
}
export interface IChockidarWatcherOptions {
pollingInterval?: number;
}
interface ExtendedWatcherRequest extends IWatcherRequest {
parsedPattern?: glob.ParsedPattern;
}
export class ChokidarWatcherService implements IWatcherService {
private static readonly FS_EVENT_DELAY = 50; // aggregate and only emit events when changes have stopped for this duration (in ms)
private static readonly EVENT_SPAM_WARNING_THRESHOLD = 60 * 1000; // warn after certain time span of event spam
private _watchers: { [watchPath: string]: IWatcher };
private _watcherCount: number;
private _options: IWatcherOptions & IChockidarWatcherOptions;
private spamCheckStartTime: number;
private spamWarningLogged: boolean;
private enospcErrorLogged: boolean;
private _onWatchEvent = new Emitter<watcherCommon.IRawFileChange[] | IWatchError>();
readonly onWatchEvent = this._onWatchEvent.event;
watch(options: IWatcherOptions & IChockidarWatcherOptions): Event<watcherCommon.IRawFileChange[] | IWatchError> {
this._options = options;
this._watchers = Object.create(null);
this._watcherCount = 0;
return this.onWatchEvent;
}
public setRoots(requests: IWatcherRequest[]): TPromise<void> {
const watchers = Object.create(null);
const newRequests = [];
const requestsByBasePath = normalizeRoots(requests);
// evaluate new & remaining watchers
for (let basePath in requestsByBasePath) {
let watcher = this._watchers[basePath];
if (watcher && isEqualRequests(watcher.requests, requestsByBasePath[basePath])) {
watchers[basePath] = watcher;
delete this._watchers[basePath];
} else {
newRequests.push(basePath);
}
}
// stop all old watchers
for (let path in this._watchers) {
this._watchers[path].stop();
}
// start all new watchers
for (let basePath of newRequests) {
let requests = requestsByBasePath[basePath];
watchers[basePath] = this._watch(basePath, requests);
}
this._watchers = watchers;
return TPromise.as(null);
}
// for test purposes
public get | () {
return this._watcherCount;
}
private _watch(basePath: string, requests: IWatcherRequest[]): IWatcher {
if (this._options.verboseLogging) {
console.log(`Start watching: ${basePath}]`);
}
const pollingInterval = this._options.pollingInterval || 1000;
const watcherOpts: chokidar.IOptions = {
ignoreInitial: true,
ignorePermissionErrors: true,
followSymlinks: true, // this is the default of chokidar and supports file events through symlinks
interval: pollingInterval, // while not used in normal cases, if any error causes chokidar to fallback to polling, increase its intervals
binaryInterval: pollingInterval,
disableGlobbing: true // fix https://github.com/Microsoft/vscode/issues/4586
};
// if there's only one request, use the built-in ignore-filterering
if (requests.length === 1) {
watcherOpts.ignored = requests[0].ignored;
}
// Chokidar fails when the basePath does not match case-identical to the path on disk
// so we have to find the real casing of the path and do some path massaging to fix this
// see https://github.com/paulmillr/chokidar/issues/418
const realBasePath = isMacintosh ? (realcaseSync(basePath) || basePath) : basePath;
const realBasePathLength = realBasePath.length;
const realBasePathDiffers = (basePath !== realBasePath);
if (realBasePathDiffers) {
console.warn(`Watcher basePath does not match version on disk and was corrected (original: ${basePath}, real: ${realBasePath})`);
}
let chokidarWatcher = chokidar.watch(realBasePath, watcherOpts);
this._watcherCount++;
// Detect if for some reason the native watcher library fails to load
if (isMacintosh && !chokidarWatcher.options.useFsEvents) {
console.error('Watcher is not using native fsevents library and is falling back to unefficient polling.');
}
let undeliveredFileEvents: watcherCommon.IRawFileChange[] = [];
let fileEventDelayer = new ThrottledDelayer(ChokidarWatcherService.FS_EVENT_DELAY);
const watcher: IWatcher = {
requests,
stop: () => {
try {
if (this._options.verboseLogging) {
console.log(`Stop watching: ${basePath}]`);
}
if (chokidarWatcher) {
chokidarWatcher.close();
this._watcherCount--;
chokidarWatcher = null;
}
if (fileEventDelayer) {
fileEventDelayer.cancel();
fileEventDelayer = null;
}
} catch (error) {
console.error(error.toString());
}
}
};
chokidarWatcher.on('all', (type: string, path: string) => {
if (isMacintosh) {
// Mac: uses NFD unicode form on disk, but we want NFC
// See also https://github.com/nodejs/node/issues/2165
path = normalizeNFC(path);
}
if (path.indexOf(realBasePath) < 0) {
return; // we really only care about absolute paths here in our basepath context here
}
// Make sure to convert the path back to its original basePath form if the realpath is different
if (realBasePathDiffers) {
path = basePath + path.substr(realBasePathLength);
}
let eventType: FileChangeType;
switch (type) {
case 'change':
eventType = FileChangeType.UPDATED;
break;
case 'add':
case 'addDir':
eventType = FileChangeType.ADDED;
break;
case 'unlink':
case 'unlinkDir':
eventType = FileChangeType.DELETED;
break;
default:
return;
}
if (isIgnored(path, watcher.requests)) {
return;
}
let event = { type: eventType, path };
// Logging
if (this._options.verboseLogging) {
console.log(eventType === FileChangeType.ADDED ? '[ADDED]' : eventType === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', path);
}
// Check for spam
const now = Date.now();
if (undeliveredFileEvents.length === 0) {
this.spamWarningLogged = false;
this.spamCheckStartTime = now;
} else if (!this.spamWarningLogged && this.spamCheckStartTime + ChokidarWatcherService.EVENT_SPAM_WARNING_THRESHOLD < now) {
this.spamWarningLogged = true;
console.warn(strings.format('Watcher is busy catching up with {0} file changes in 60 seconds. Latest changed path is "{1}"', undeliveredFileEvents.length, event.path));
}
// Add to buffer
undeliveredFileEvents.push(event);
// Delay and send buffer
fileEventDelayer.trigger(() => {
const events = undeliveredFileEvents;
undeliveredFileEvents = [];
// Broadcast to clients normalized
const res = watcherCommon.normalize(events);
this._onWatchEvent.fire(res);
// Logging
if (this._options.verboseLogging) {
res.forEach(r => {
console.log(' >> normalized', r.type === FileChangeType.ADDED ? '[ADDED]' : r.type === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', r.path);
});
}
return TPromise.as(null);
});
});
chokidarWatcher.on('error', (error: Error) => {
if (error) {
// Specially handle ENOSPC errors that can happen when
// the watcher consumes so many file descriptors that
// we are running into a limit. We only want to warn
// once in this case to avoid log spam.
// See https://github.com/Microsoft/vscode/issues/7950
if ((<any>error).code === 'ENOSPC') {
if (!this.enospcErrorLogged) {
this.enospcErrorLogged = true;
this.stop();
this._onWatchEvent.fire({ message: 'Inotify limit reached (ENOSPC)' });
}
} else {
console.error(error.toString());
}
}
});
return watcher;
}
public stop(): TPromise<void> {
for (let path in this._watchers) {
let watcher = this._watchers[path];
watcher.stop();
}
this._watchers = Object.create(null);
return TPromise.as(void 0);
}
}
function isIgnored(path: string, requests: ExtendedWatcherRequest[]): boolean {
for (let request of requests) {
if (request.basePath === path) {
return false;
}
if (paths.isEqualOrParent(path, request.basePath)) {
if (!request.parsedPattern) {
if (request.ignored && request.ignored.length > 0) {
let pattern = `{${request.ignored.map(i => i + '/**').join(',')}}`;
request.parsedPattern = glob.parse(pattern);
} else {
request.parsedPattern = () => false;
}
}
const relPath = path.substr(request.basePath.length + 1);
if (!request.parsedPattern(relPath)) {
return false;
}
}
}
return true;
}
/**
* Normalizes a set of root paths by grouping by the most parent root path.
* equests with Sub paths are skipped if they have the same ignored set as the parent.
*/
export function normalizeRoots(requests: IWatcherRequest[]): { [basePath: string]: IWatcherRequest[] } {
requests = requests.sort((r1, r2) => r1.basePath.localeCompare(r2.basePath));
let prevRequest: IWatcherRequest = null;
let result: { [basePath: string]: IWatcherRequest[] } = Object.create(null);
for (let request of requests) {
let basePath = request.basePath;
let ignored = (request.ignored || []).sort();
if (prevRequest && (paths.isEqualOrParent(basePath, prevRequest.basePath))) {
if (!isEqualIgnore(ignored, prevRequest.ignored)) {
result[prevRequest.basePath].push({ basePath, ignored });
}
} else {
prevRequest = { basePath, ignored };
result[basePath] = [prevRequest];
}
}
return result;
}
function isEqualRequests(r1: IWatcherRequest[], r2: IWatcherRequest[]) {
if (r1.length !== r2.length) {
return false;
}
for (let k = 0; k < r1.length; k++) {
if (r1[k].basePath !== r2[k].basePath || !isEqualIgnore(r1[k].ignored, r2[k].ignored)) {
return false;
}
}
return true;
}
function isEqualIgnore(i1: string[], i2: string[]) {
if (i1.length !== i2.length) {
return false;
}
for (let k = 0; k < i1.length; k++) {
if (i1[k] !== i2[k]) {
return false;
}
}
return true;
}
| wacherCount | identifier_name |
chokidarWatcherService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as chokidar from 'vscode-chokidar';
import * as fs from 'fs';
import * as gracefulFs from 'graceful-fs';
gracefulFs.gracefulify(fs);
import * as paths from 'vs/base/common/paths';
import * as glob from 'vs/base/common/glob';
import { TPromise } from 'vs/base/common/winjs.base';
import { FileChangeType } from 'vs/platform/files/common/files';
import { ThrottledDelayer } from 'vs/base/common/async';
import * as strings from 'vs/base/common/strings';
import { normalizeNFC } from 'vs/base/common/normalization';
import { realcaseSync } from 'vs/base/node/extfs';
import { isMacintosh } from 'vs/base/common/platform';
import * as watcherCommon from 'vs/workbench/services/files/node/watcher/common';
import { IWatcherRequest, IWatcherService, IWatcherOptions, IWatchError } from 'vs/workbench/services/files/node/watcher/unix/watcher';
import { Emitter, Event } from 'vs/base/common/event';
interface IWatcher {
requests: ExtendedWatcherRequest[];
stop(): any;
}
export interface IChockidarWatcherOptions {
pollingInterval?: number;
}
interface ExtendedWatcherRequest extends IWatcherRequest {
parsedPattern?: glob.ParsedPattern;
}
export class ChokidarWatcherService implements IWatcherService {
private static readonly FS_EVENT_DELAY = 50; // aggregate and only emit events when changes have stopped for this duration (in ms)
private static readonly EVENT_SPAM_WARNING_THRESHOLD = 60 * 1000; // warn after certain time span of event spam
private _watchers: { [watchPath: string]: IWatcher };
private _watcherCount: number;
private _options: IWatcherOptions & IChockidarWatcherOptions;
private spamCheckStartTime: number;
private spamWarningLogged: boolean;
private enospcErrorLogged: boolean;
private _onWatchEvent = new Emitter<watcherCommon.IRawFileChange[] | IWatchError>();
readonly onWatchEvent = this._onWatchEvent.event;
watch(options: IWatcherOptions & IChockidarWatcherOptions): Event<watcherCommon.IRawFileChange[] | IWatchError> {
this._options = options;
this._watchers = Object.create(null);
this._watcherCount = 0;
return this.onWatchEvent;
}
public setRoots(requests: IWatcherRequest[]): TPromise<void> {
const watchers = Object.create(null);
const newRequests = [];
const requestsByBasePath = normalizeRoots(requests);
// evaluate new & remaining watchers
for (let basePath in requestsByBasePath) {
let watcher = this._watchers[basePath];
if (watcher && isEqualRequests(watcher.requests, requestsByBasePath[basePath])) {
watchers[basePath] = watcher;
delete this._watchers[basePath];
} else {
newRequests.push(basePath);
}
}
// stop all old watchers
for (let path in this._watchers) {
this._watchers[path].stop();
}
// start all new watchers
for (let basePath of newRequests) {
let requests = requestsByBasePath[basePath];
watchers[basePath] = this._watch(basePath, requests);
}
this._watchers = watchers;
return TPromise.as(null);
}
// for test purposes
public get wacherCount() {
return this._watcherCount;
}
private _watch(basePath: string, requests: IWatcherRequest[]): IWatcher {
if (this._options.verboseLogging) {
console.log(`Start watching: ${basePath}]`);
}
const pollingInterval = this._options.pollingInterval || 1000;
const watcherOpts: chokidar.IOptions = {
ignoreInitial: true,
ignorePermissionErrors: true,
followSymlinks: true, // this is the default of chokidar and supports file events through symlinks
interval: pollingInterval, // while not used in normal cases, if any error causes chokidar to fallback to polling, increase its intervals
binaryInterval: pollingInterval,
disableGlobbing: true // fix https://github.com/Microsoft/vscode/issues/4586
};
// if there's only one request, use the built-in ignore-filterering
if (requests.length === 1) {
watcherOpts.ignored = requests[0].ignored;
}
// Chokidar fails when the basePath does not match case-identical to the path on disk
// so we have to find the real casing of the path and do some path massaging to fix this
// see https://github.com/paulmillr/chokidar/issues/418
const realBasePath = isMacintosh ? (realcaseSync(basePath) || basePath) : basePath;
const realBasePathLength = realBasePath.length;
const realBasePathDiffers = (basePath !== realBasePath);
if (realBasePathDiffers) {
console.warn(`Watcher basePath does not match version on disk and was corrected (original: ${basePath}, real: ${realBasePath})`);
}
let chokidarWatcher = chokidar.watch(realBasePath, watcherOpts);
this._watcherCount++;
// Detect if for some reason the native watcher library fails to load
if (isMacintosh && !chokidarWatcher.options.useFsEvents) {
console.error('Watcher is not using native fsevents library and is falling back to unefficient polling.');
}
let undeliveredFileEvents: watcherCommon.IRawFileChange[] = [];
let fileEventDelayer = new ThrottledDelayer(ChokidarWatcherService.FS_EVENT_DELAY);
const watcher: IWatcher = {
requests,
stop: () => {
try {
if (this._options.verboseLogging) {
console.log(`Stop watching: ${basePath}]`);
}
if (chokidarWatcher) {
chokidarWatcher.close();
this._watcherCount--;
chokidarWatcher = null;
}
if (fileEventDelayer) {
fileEventDelayer.cancel();
fileEventDelayer = null;
}
} catch (error) {
console.error(error.toString());
}
}
};
chokidarWatcher.on('all', (type: string, path: string) => {
if (isMacintosh) {
// Mac: uses NFD unicode form on disk, but we want NFC
// See also https://github.com/nodejs/node/issues/2165
path = normalizeNFC(path);
}
if (path.indexOf(realBasePath) < 0) {
return; // we really only care about absolute paths here in our basepath context here
}
// Make sure to convert the path back to its original basePath form if the realpath is different
if (realBasePathDiffers) {
path = basePath + path.substr(realBasePathLength);
}
let eventType: FileChangeType;
switch (type) {
case 'change':
eventType = FileChangeType.UPDATED;
break;
case 'add':
case 'addDir':
eventType = FileChangeType.ADDED;
break;
case 'unlink':
case 'unlinkDir':
eventType = FileChangeType.DELETED;
break;
default:
return;
}
if (isIgnored(path, watcher.requests)) {
return;
}
let event = { type: eventType, path };
// Logging
if (this._options.verboseLogging) {
console.log(eventType === FileChangeType.ADDED ? '[ADDED]' : eventType === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', path);
}
// Check for spam
const now = Date.now();
if (undeliveredFileEvents.length === 0) {
this.spamWarningLogged = false;
this.spamCheckStartTime = now;
} else if (!this.spamWarningLogged && this.spamCheckStartTime + ChokidarWatcherService.EVENT_SPAM_WARNING_THRESHOLD < now) {
this.spamWarningLogged = true;
console.warn(strings.format('Watcher is busy catching up with {0} file changes in 60 seconds. Latest changed path is "{1}"', undeliveredFileEvents.length, event.path));
}
// Add to buffer
undeliveredFileEvents.push(event);
// Delay and send buffer
fileEventDelayer.trigger(() => {
const events = undeliveredFileEvents;
undeliveredFileEvents = [];
// Broadcast to clients normalized
const res = watcherCommon.normalize(events);
this._onWatchEvent.fire(res);
// Logging
if (this._options.verboseLogging) {
res.forEach(r => {
console.log(' >> normalized', r.type === FileChangeType.ADDED ? '[ADDED]' : r.type === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', r.path);
});
}
return TPromise.as(null);
});
});
chokidarWatcher.on('error', (error: Error) => {
if (error) {
// Specially handle ENOSPC errors that can happen when
// the watcher consumes so many file descriptors that
// we are running into a limit. We only want to warn
// once in this case to avoid log spam.
// See https://github.com/Microsoft/vscode/issues/7950
if ((<any>error).code === 'ENOSPC') {
if (!this.enospcErrorLogged) {
this.enospcErrorLogged = true;
this.stop();
this._onWatchEvent.fire({ message: 'Inotify limit reached (ENOSPC)' });
}
} else {
console.error(error.toString());
}
}
});
return watcher;
}
public stop(): TPromise<void> {
for (let path in this._watchers) {
let watcher = this._watchers[path];
watcher.stop();
}
this._watchers = Object.create(null);
return TPromise.as(void 0);
}
}
function isIgnored(path: string, requests: ExtendedWatcherRequest[]): boolean {
for (let request of requests) {
if (request.basePath === path) {
return false;
}
if (paths.isEqualOrParent(path, request.basePath)) {
if (!request.parsedPattern) {
if (request.ignored && request.ignored.length > 0) {
let pattern = `{${request.ignored.map(i => i + '/**').join(',')}}`;
request.parsedPattern = glob.parse(pattern);
} else {
request.parsedPattern = () => false;
}
}
const relPath = path.substr(request.basePath.length + 1);
if (!request.parsedPattern(relPath)) {
return false;
}
}
}
return true;
}
/**
* Normalizes a set of root paths by grouping by the most parent root path.
* equests with Sub paths are skipped if they have the same ignored set as the parent.
*/
export function normalizeRoots(requests: IWatcherRequest[]): { [basePath: string]: IWatcherRequest[] } {
requests = requests.sort((r1, r2) => r1.basePath.localeCompare(r2.basePath)); | let ignored = (request.ignored || []).sort();
if (prevRequest && (paths.isEqualOrParent(basePath, prevRequest.basePath))) {
if (!isEqualIgnore(ignored, prevRequest.ignored)) {
result[prevRequest.basePath].push({ basePath, ignored });
}
} else {
prevRequest = { basePath, ignored };
result[basePath] = [prevRequest];
}
}
return result;
}
function isEqualRequests(r1: IWatcherRequest[], r2: IWatcherRequest[]) {
if (r1.length !== r2.length) {
return false;
}
for (let k = 0; k < r1.length; k++) {
if (r1[k].basePath !== r2[k].basePath || !isEqualIgnore(r1[k].ignored, r2[k].ignored)) {
return false;
}
}
return true;
}
function isEqualIgnore(i1: string[], i2: string[]) {
if (i1.length !== i2.length) {
return false;
}
for (let k = 0; k < i1.length; k++) {
if (i1[k] !== i2[k]) {
return false;
}
}
return true;
} | let prevRequest: IWatcherRequest = null;
let result: { [basePath: string]: IWatcherRequest[] } = Object.create(null);
for (let request of requests) {
let basePath = request.basePath; | random_line_split |
chokidarWatcherService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as chokidar from 'vscode-chokidar';
import * as fs from 'fs';
import * as gracefulFs from 'graceful-fs';
gracefulFs.gracefulify(fs);
import * as paths from 'vs/base/common/paths';
import * as glob from 'vs/base/common/glob';
import { TPromise } from 'vs/base/common/winjs.base';
import { FileChangeType } from 'vs/platform/files/common/files';
import { ThrottledDelayer } from 'vs/base/common/async';
import * as strings from 'vs/base/common/strings';
import { normalizeNFC } from 'vs/base/common/normalization';
import { realcaseSync } from 'vs/base/node/extfs';
import { isMacintosh } from 'vs/base/common/platform';
import * as watcherCommon from 'vs/workbench/services/files/node/watcher/common';
import { IWatcherRequest, IWatcherService, IWatcherOptions, IWatchError } from 'vs/workbench/services/files/node/watcher/unix/watcher';
import { Emitter, Event } from 'vs/base/common/event';
interface IWatcher {
requests: ExtendedWatcherRequest[];
stop(): any;
}
export interface IChockidarWatcherOptions {
pollingInterval?: number;
}
interface ExtendedWatcherRequest extends IWatcherRequest {
parsedPattern?: glob.ParsedPattern;
}
export class ChokidarWatcherService implements IWatcherService {
private static readonly FS_EVENT_DELAY = 50; // aggregate and only emit events when changes have stopped for this duration (in ms)
private static readonly EVENT_SPAM_WARNING_THRESHOLD = 60 * 1000; // warn after certain time span of event spam
private _watchers: { [watchPath: string]: IWatcher };
private _watcherCount: number;
private _options: IWatcherOptions & IChockidarWatcherOptions;
private spamCheckStartTime: number;
private spamWarningLogged: boolean;
private enospcErrorLogged: boolean;
private _onWatchEvent = new Emitter<watcherCommon.IRawFileChange[] | IWatchError>();
readonly onWatchEvent = this._onWatchEvent.event;
watch(options: IWatcherOptions & IChockidarWatcherOptions): Event<watcherCommon.IRawFileChange[] | IWatchError> {
this._options = options;
this._watchers = Object.create(null);
this._watcherCount = 0;
return this.onWatchEvent;
}
public setRoots(requests: IWatcherRequest[]): TPromise<void> {
const watchers = Object.create(null);
const newRequests = [];
const requestsByBasePath = normalizeRoots(requests);
// evaluate new & remaining watchers
for (let basePath in requestsByBasePath) {
let watcher = this._watchers[basePath];
if (watcher && isEqualRequests(watcher.requests, requestsByBasePath[basePath])) {
watchers[basePath] = watcher;
delete this._watchers[basePath];
} else {
newRequests.push(basePath);
}
}
// stop all old watchers
for (let path in this._watchers) {
this._watchers[path].stop();
}
// start all new watchers
for (let basePath of newRequests) {
let requests = requestsByBasePath[basePath];
watchers[basePath] = this._watch(basePath, requests);
}
this._watchers = watchers;
return TPromise.as(null);
}
// for test purposes
public get wacherCount() {
return this._watcherCount;
}
private _watch(basePath: string, requests: IWatcherRequest[]): IWatcher {
if (this._options.verboseLogging) {
console.log(`Start watching: ${basePath}]`);
}
const pollingInterval = this._options.pollingInterval || 1000;
const watcherOpts: chokidar.IOptions = {
ignoreInitial: true,
ignorePermissionErrors: true,
followSymlinks: true, // this is the default of chokidar and supports file events through symlinks
interval: pollingInterval, // while not used in normal cases, if any error causes chokidar to fallback to polling, increase its intervals
binaryInterval: pollingInterval,
disableGlobbing: true // fix https://github.com/Microsoft/vscode/issues/4586
};
// if there's only one request, use the built-in ignore-filterering
if (requests.length === 1) {
watcherOpts.ignored = requests[0].ignored;
}
// Chokidar fails when the basePath does not match case-identical to the path on disk
// so we have to find the real casing of the path and do some path massaging to fix this
// see https://github.com/paulmillr/chokidar/issues/418
const realBasePath = isMacintosh ? (realcaseSync(basePath) || basePath) : basePath;
const realBasePathLength = realBasePath.length;
const realBasePathDiffers = (basePath !== realBasePath);
if (realBasePathDiffers) |
let chokidarWatcher = chokidar.watch(realBasePath, watcherOpts);
this._watcherCount++;
// Detect if for some reason the native watcher library fails to load
if (isMacintosh && !chokidarWatcher.options.useFsEvents) {
console.error('Watcher is not using native fsevents library and is falling back to unefficient polling.');
}
let undeliveredFileEvents: watcherCommon.IRawFileChange[] = [];
let fileEventDelayer = new ThrottledDelayer(ChokidarWatcherService.FS_EVENT_DELAY);
const watcher: IWatcher = {
requests,
stop: () => {
try {
if (this._options.verboseLogging) {
console.log(`Stop watching: ${basePath}]`);
}
if (chokidarWatcher) {
chokidarWatcher.close();
this._watcherCount--;
chokidarWatcher = null;
}
if (fileEventDelayer) {
fileEventDelayer.cancel();
fileEventDelayer = null;
}
} catch (error) {
console.error(error.toString());
}
}
};
chokidarWatcher.on('all', (type: string, path: string) => {
if (isMacintosh) {
// Mac: uses NFD unicode form on disk, but we want NFC
// See also https://github.com/nodejs/node/issues/2165
path = normalizeNFC(path);
}
if (path.indexOf(realBasePath) < 0) {
return; // we really only care about absolute paths here in our basepath context here
}
// Make sure to convert the path back to its original basePath form if the realpath is different
if (realBasePathDiffers) {
path = basePath + path.substr(realBasePathLength);
}
let eventType: FileChangeType;
switch (type) {
case 'change':
eventType = FileChangeType.UPDATED;
break;
case 'add':
case 'addDir':
eventType = FileChangeType.ADDED;
break;
case 'unlink':
case 'unlinkDir':
eventType = FileChangeType.DELETED;
break;
default:
return;
}
if (isIgnored(path, watcher.requests)) {
return;
}
let event = { type: eventType, path };
// Logging
if (this._options.verboseLogging) {
console.log(eventType === FileChangeType.ADDED ? '[ADDED]' : eventType === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', path);
}
// Check for spam
const now = Date.now();
if (undeliveredFileEvents.length === 0) {
this.spamWarningLogged = false;
this.spamCheckStartTime = now;
} else if (!this.spamWarningLogged && this.spamCheckStartTime + ChokidarWatcherService.EVENT_SPAM_WARNING_THRESHOLD < now) {
this.spamWarningLogged = true;
console.warn(strings.format('Watcher is busy catching up with {0} file changes in 60 seconds. Latest changed path is "{1}"', undeliveredFileEvents.length, event.path));
}
// Add to buffer
undeliveredFileEvents.push(event);
// Delay and send buffer
fileEventDelayer.trigger(() => {
const events = undeliveredFileEvents;
undeliveredFileEvents = [];
// Broadcast to clients normalized
const res = watcherCommon.normalize(events);
this._onWatchEvent.fire(res);
// Logging
if (this._options.verboseLogging) {
res.forEach(r => {
console.log(' >> normalized', r.type === FileChangeType.ADDED ? '[ADDED]' : r.type === FileChangeType.DELETED ? '[DELETED]' : '[CHANGED]', r.path);
});
}
return TPromise.as(null);
});
});
chokidarWatcher.on('error', (error: Error) => {
if (error) {
// Specially handle ENOSPC errors that can happen when
// the watcher consumes so many file descriptors that
// we are running into a limit. We only want to warn
// once in this case to avoid log spam.
// See https://github.com/Microsoft/vscode/issues/7950
if ((<any>error).code === 'ENOSPC') {
if (!this.enospcErrorLogged) {
this.enospcErrorLogged = true;
this.stop();
this._onWatchEvent.fire({ message: 'Inotify limit reached (ENOSPC)' });
}
} else {
console.error(error.toString());
}
}
});
return watcher;
}
public stop(): TPromise<void> {
for (let path in this._watchers) {
let watcher = this._watchers[path];
watcher.stop();
}
this._watchers = Object.create(null);
return TPromise.as(void 0);
}
}
function isIgnored(path: string, requests: ExtendedWatcherRequest[]): boolean {
for (let request of requests) {
if (request.basePath === path) {
return false;
}
if (paths.isEqualOrParent(path, request.basePath)) {
if (!request.parsedPattern) {
if (request.ignored && request.ignored.length > 0) {
let pattern = `{${request.ignored.map(i => i + '/**').join(',')}}`;
request.parsedPattern = glob.parse(pattern);
} else {
request.parsedPattern = () => false;
}
}
const relPath = path.substr(request.basePath.length + 1);
if (!request.parsedPattern(relPath)) {
return false;
}
}
}
return true;
}
/**
* Normalizes a set of root paths by grouping by the most parent root path.
* equests with Sub paths are skipped if they have the same ignored set as the parent.
*/
export function normalizeRoots(requests: IWatcherRequest[]): { [basePath: string]: IWatcherRequest[] } {
requests = requests.sort((r1, r2) => r1.basePath.localeCompare(r2.basePath));
let prevRequest: IWatcherRequest = null;
let result: { [basePath: string]: IWatcherRequest[] } = Object.create(null);
for (let request of requests) {
let basePath = request.basePath;
let ignored = (request.ignored || []).sort();
if (prevRequest && (paths.isEqualOrParent(basePath, prevRequest.basePath))) {
if (!isEqualIgnore(ignored, prevRequest.ignored)) {
result[prevRequest.basePath].push({ basePath, ignored });
}
} else {
prevRequest = { basePath, ignored };
result[basePath] = [prevRequest];
}
}
return result;
}
function isEqualRequests(r1: IWatcherRequest[], r2: IWatcherRequest[]) {
if (r1.length !== r2.length) {
return false;
}
for (let k = 0; k < r1.length; k++) {
if (r1[k].basePath !== r2[k].basePath || !isEqualIgnore(r1[k].ignored, r2[k].ignored)) {
return false;
}
}
return true;
}
function isEqualIgnore(i1: string[], i2: string[]) {
if (i1.length !== i2.length) {
return false;
}
for (let k = 0; k < i1.length; k++) {
if (i1[k] !== i2[k]) {
return false;
}
}
return true;
}
| {
console.warn(`Watcher basePath does not match version on disk and was corrected (original: ${basePath}, real: ${realBasePath})`);
} | conditional_block |
fm_demod.py | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
from gnuradio.blks2impl.fm_emph import fm_deemph
from math import pi
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float strem in the range of [-1.0, +1.0].
@param channel_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param deviation: maximum FM deviation (default = 5000)
@type deviation: float
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
@param gain: gain applied to audio output (default = 1.0)
@type gain: float
@param tau: deemphasis time constant (default = 75e-6), specify 'None'
to prevent deemphasis
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = gr.quadrature_demod_cf(k)
audio_taps = optfir.low_pass(gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
if tau is not None:
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer | fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain | @param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim): | random_line_split |
fm_demod.py | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
from gnuradio.blks2impl.fm_emph import fm_deemph
from math import pi
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float strem in the range of [-1.0, +1.0].
@param channel_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param deviation: maximum FM deviation (default = 5000)
@type deviation: float
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
@param gain: gain applied to audio output (default = 1.0)
@type gain: float
@param tau: deemphasis time constant (default = 75e-6), specify 'None'
to prevent deemphasis
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = gr.quadrature_demod_cf(k)
audio_taps = optfir.low_pass(gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
if tau is not None:
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class | (fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
| demod_200kf3e_cf | identifier_name |
fm_demod.py | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
from gnuradio.blks2impl.fm_emph import fm_deemph
from math import pi
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float strem in the range of [-1.0, +1.0].
@param channel_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param deviation: maximum FM deviation (default = 5000)
@type deviation: float
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
@param gain: gain applied to audio output (default = 1.0)
@type gain: float
@param tau: deemphasis time constant (default = 75e-6), specify 'None'
to prevent deemphasis
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = gr.quadrature_demod_cf(k)
audio_taps = optfir.low_pass(gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
if tau is not None:
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
|
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
| """
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency | identifier_body |
fm_demod.py | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
from gnuradio.blks2impl.fm_emph import fm_deemph
from math import pi
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float strem in the range of [-1.0, +1.0].
@param channel_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param deviation: maximum FM deviation (default = 5000)
@type deviation: float
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
@param gain: gain applied to audio output (default = 1.0)
@type gain: float
@param tau: deemphasis time constant (default = 75e-6), specify 'None'
to prevent deemphasis
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = gr.quadrature_demod_cf(k)
audio_taps = optfir.low_pass(gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
if tau is not None:
|
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
| DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self) | conditional_block |
statuses.ts | export const BattleStatuses: {[k: string]: ModdedPureEffectData} = {
brn: {
inherit: true,
onResidual(pokemon) {
this.damage(pokemon.baseMaxhp / 8);
},
},
par: {
inherit: true,
onModifySpe(spe, pokemon) {
if (!pokemon.hasAbility('quickfeet')) {
return this.chainModify(0.25);
}
},
},
confusion: {
inherit: true,
onBeforeMove(pokemon) {
pokemon.volatiles.confusion.time--;
if (!pokemon.volatiles.confusion.time) {
pokemon.removeVolatile('confusion');
return;
}
this.add('-activate', pokemon, 'confusion');
if (this.randomChance(1, 2)) { | }
const damage = this.getDamage(pokemon, pokemon, 40);
if (typeof damage !== 'number') throw new Error("Confusion damage not dealt");
this.damage(damage, pokemon, pokemon, {
id: 'confused',
effectType: 'Move',
type: '???',
} as ActiveMove);
return false;
},
},
choicelock: {
inherit: true,
onBeforeMove() {},
},
}; | return; | random_line_split |
statuses.ts | export const BattleStatuses: {[k: string]: ModdedPureEffectData} = {
brn: {
inherit: true,
| (pokemon) {
this.damage(pokemon.baseMaxhp / 8);
},
},
par: {
inherit: true,
onModifySpe(spe, pokemon) {
if (!pokemon.hasAbility('quickfeet')) {
return this.chainModify(0.25);
}
},
},
confusion: {
inherit: true,
onBeforeMove(pokemon) {
pokemon.volatiles.confusion.time--;
if (!pokemon.volatiles.confusion.time) {
pokemon.removeVolatile('confusion');
return;
}
this.add('-activate', pokemon, 'confusion');
if (this.randomChance(1, 2)) {
return;
}
const damage = this.getDamage(pokemon, pokemon, 40);
if (typeof damage !== 'number') throw new Error("Confusion damage not dealt");
this.damage(damage, pokemon, pokemon, {
id: 'confused',
effectType: 'Move',
type: '???',
} as ActiveMove);
return false;
},
},
choicelock: {
inherit: true,
onBeforeMove() {},
},
};
| onResidual | identifier_name |
statuses.ts | export const BattleStatuses: {[k: string]: ModdedPureEffectData} = {
brn: {
inherit: true,
onResidual(pokemon) {
this.damage(pokemon.baseMaxhp / 8);
},
},
par: {
inherit: true,
onModifySpe(spe, pokemon) {
if (!pokemon.hasAbility('quickfeet')) {
return this.chainModify(0.25);
}
},
},
confusion: {
inherit: true,
onBeforeMove(pokemon) {
pokemon.volatiles.confusion.time--;
if (!pokemon.volatiles.confusion.time) {
pokemon.removeVolatile('confusion');
return;
}
this.add('-activate', pokemon, 'confusion');
if (this.randomChance(1, 2)) {
return;
}
const damage = this.getDamage(pokemon, pokemon, 40);
if (typeof damage !== 'number') throw new Error("Confusion damage not dealt");
this.damage(damage, pokemon, pokemon, {
id: 'confused',
effectType: 'Move',
type: '???',
} as ActiveMove);
return false;
},
},
choicelock: {
inherit: true,
onBeforeMove() | ,
},
};
| {} | identifier_body |
main.rs | extern crate feroxide;
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sef::*;
use feroxide::data_sep::*;
use feroxide::*;
fn main() | {
// You can create digital molecules with ease on two ways:
// ... the easy way
let carbondioxide = Molecule::from_string("CO2").unwrap();
// ... and the fast way
let carbonmonoxide = Molecule {
compounds: vec![
MoleculeCompound {
atom: CARBON,
amount: 1,
},
MoleculeCompound {
atom: OXYGEN,
amount: 1,
},
],
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec![
ContainerCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_molecule!(WATER.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_atom!(OXYGEN.clone()),
moles: Moles::from(10000000000.0),
},
],
available_energy: Energy::from(100_000f64), // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec![
ReactionCompound {
element: ion_from_atom!(OXYGEN.clone()),
amount: 1,
},
ReactionCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
amount: 2,
},
],
},
rhs: ReactionSide {
compounds: vec![ReactionCompound {
element: ion_from_molecule!(carbondioxide.clone()),
amount: 2,
}],
},
is_equilibrium: true,
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i + 1, container.to_string());
}
// Redox reactions are also possible
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-").unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e").unwrap(),
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol representation
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
// Print the SEF value
println!(
"SEF(AlCl3) = {} kJ/mol",
get_sef(&ion_from_string!("AlCl3")).unwrap()
);
// Boom
println!("\n\n\n");
let mut water_container =
Container::<Ion>::ion_from_string("2000 H2; + 1000 O2; [10000 J]").unwrap();
println!("pre: {}", water_container);
let redox_boom = get_redox_reaction(&water_container).unwrap();
println!("reaction: {}", redox_boom.elem_reaction().symbol());
for _ in 0..100 {
water_container.react(&redox_boom);
}
println!("post: {}", water_container);
println!("\n\n\n");
// Automatic redox reactions
println!("\n\n\n");
// Get the possible redox reactions from a container
let mut redox_container = Container {
contents: vec![
ContainerCompound {
element: ion_from_string!("Fe"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("O2"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("H2O"),
moles: Moles::from(200.0),
},
],
available_energy: Energy::from(100_000f64),
};
let redox_reaction = get_redox_reaction(&redox_container);
if let Some(redox) = redox_reaction {
println!("\n\n");
println!("Container: {}", redox_container);
println!("\tcan have the following reaction:");
println!("Redox reaction: \n{}", redox.symbol());
println!("Total reaction: {}", redox.elem_reaction().symbol());
for _ in 0..100 {
redox_container.react(&redox);
}
println!("\n");
println!("After 100 times:");
println!("Container: {}", redox_container);
let rust = ElemReaction::<Ion>::ion_from_string("Fe;2+ + 2OH;- > FeO2H2;0").unwrap();
println!("\n");
println!("Container: {}", &redox_container);
println!("\tcan have the following reaction:");
println!("Salt reaction: \n{}", rust.symbol());
let fe2 = ContainerCompound::<Ion>::ion_from_string("Fe;2+").unwrap();
while redox_container.contains(&fe2) {
redox_container.react(&rust);
}
println!("\n");
println!("After all {} is gone:", fe2.symbol());
println!("Container: {}", redox_container);
println!("\n\n\n");
}
}
| identifier_body |
|
main.rs | extern crate feroxide;
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sef::*;
use feroxide::data_sep::*;
use feroxide::*;
fn | () {
// You can create digital molecules with ease on two ways:
// ... the easy way
let carbondioxide = Molecule::from_string("CO2").unwrap();
// ... and the fast way
let carbonmonoxide = Molecule {
compounds: vec![
MoleculeCompound {
atom: CARBON,
amount: 1,
},
MoleculeCompound {
atom: OXYGEN,
amount: 1,
},
],
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec![
ContainerCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_molecule!(WATER.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_atom!(OXYGEN.clone()),
moles: Moles::from(10000000000.0),
},
],
available_energy: Energy::from(100_000f64), // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec![
ReactionCompound {
element: ion_from_atom!(OXYGEN.clone()),
amount: 1,
},
ReactionCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
amount: 2,
},
],
},
rhs: ReactionSide {
compounds: vec![ReactionCompound {
element: ion_from_molecule!(carbondioxide.clone()),
amount: 2,
}],
},
is_equilibrium: true,
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i + 1, container.to_string());
}
// Redox reactions are also possible
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-").unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e").unwrap(),
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol representation
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
// Print the SEF value
println!(
"SEF(AlCl3) = {} kJ/mol",
get_sef(&ion_from_string!("AlCl3")).unwrap()
);
// Boom
println!("\n\n\n");
let mut water_container =
Container::<Ion>::ion_from_string("2000 H2; + 1000 O2; [10000 J]").unwrap();
println!("pre: {}", water_container);
let redox_boom = get_redox_reaction(&water_container).unwrap();
println!("reaction: {}", redox_boom.elem_reaction().symbol());
for _ in 0..100 {
water_container.react(&redox_boom);
}
println!("post: {}", water_container);
println!("\n\n\n");
// Automatic redox reactions
println!("\n\n\n");
// Get the possible redox reactions from a container
let mut redox_container = Container {
contents: vec![
ContainerCompound {
element: ion_from_string!("Fe"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("O2"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("H2O"),
moles: Moles::from(200.0),
},
],
available_energy: Energy::from(100_000f64),
};
let redox_reaction = get_redox_reaction(&redox_container);
if let Some(redox) = redox_reaction {
println!("\n\n");
println!("Container: {}", redox_container);
println!("\tcan have the following reaction:");
println!("Redox reaction: \n{}", redox.symbol());
println!("Total reaction: {}", redox.elem_reaction().symbol());
for _ in 0..100 {
redox_container.react(&redox);
}
println!("\n");
println!("After 100 times:");
println!("Container: {}", redox_container);
let rust = ElemReaction::<Ion>::ion_from_string("Fe;2+ + 2OH;- > FeO2H2;0").unwrap();
println!("\n");
println!("Container: {}", &redox_container);
println!("\tcan have the following reaction:");
println!("Salt reaction: \n{}", rust.symbol());
let fe2 = ContainerCompound::<Ion>::ion_from_string("Fe;2+").unwrap();
while redox_container.contains(&fe2) {
redox_container.react(&rust);
}
println!("\n");
println!("After all {} is gone:", fe2.symbol());
println!("Container: {}", redox_container);
println!("\n\n\n");
}
}
| main | identifier_name |
main.rs | extern crate feroxide;
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sef::*;
use feroxide::data_sep::*;
use feroxide::*;
fn main() {
// You can create digital molecules with ease on two ways:
// ... the easy way
let carbondioxide = Molecule::from_string("CO2").unwrap();
// ... and the fast way
let carbonmonoxide = Molecule {
compounds: vec![
MoleculeCompound {
atom: CARBON,
amount: 1,
},
MoleculeCompound {
atom: OXYGEN,
amount: 1,
},
],
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec![
ContainerCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_molecule!(WATER.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_atom!(OXYGEN.clone()),
moles: Moles::from(10000000000.0),
},
],
available_energy: Energy::from(100_000f64), // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec![
ReactionCompound {
element: ion_from_atom!(OXYGEN.clone()),
amount: 1,
},
ReactionCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
amount: 2,
},
],
},
rhs: ReactionSide {
compounds: vec![ReactionCompound {
element: ion_from_molecule!(carbondioxide.clone()),
amount: 2,
}],
},
is_equilibrium: true,
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i + 1, container.to_string());
}
// Redox reactions are also possible
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-").unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e").unwrap(),
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol representation
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
// Print the SEF value
println!(
"SEF(AlCl3) = {} kJ/mol",
get_sef(&ion_from_string!("AlCl3")).unwrap()
);
// Boom
println!("\n\n\n");
let mut water_container =
Container::<Ion>::ion_from_string("2000 H2; + 1000 O2; [10000 J]").unwrap();
println!("pre: {}", water_container);
let redox_boom = get_redox_reaction(&water_container).unwrap();
println!("reaction: {}", redox_boom.elem_reaction().symbol());
for _ in 0..100 {
water_container.react(&redox_boom);
}
println!("post: {}", water_container);
println!("\n\n\n");
// Automatic redox reactions
println!("\n\n\n");
// Get the possible redox reactions from a container
let mut redox_container = Container {
contents: vec![
ContainerCompound {
element: ion_from_string!("Fe"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("O2"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("H2O"),
moles: Moles::from(200.0),
},
],
available_energy: Energy::from(100_000f64),
};
let redox_reaction = get_redox_reaction(&redox_container);
if let Some(redox) = redox_reaction {
| println!("\n\n");
println!("Container: {}", redox_container);
println!("\tcan have the following reaction:");
println!("Redox reaction: \n{}", redox.symbol());
println!("Total reaction: {}", redox.elem_reaction().symbol());
for _ in 0..100 {
redox_container.react(&redox);
}
println!("\n");
println!("After 100 times:");
println!("Container: {}", redox_container);
let rust = ElemReaction::<Ion>::ion_from_string("Fe;2+ + 2OH;- > FeO2H2;0").unwrap();
println!("\n");
println!("Container: {}", &redox_container);
println!("\tcan have the following reaction:");
println!("Salt reaction: \n{}", rust.symbol());
let fe2 = ContainerCompound::<Ion>::ion_from_string("Fe;2+").unwrap();
while redox_container.contains(&fe2) {
redox_container.react(&rust);
}
println!("\n");
println!("After all {} is gone:", fe2.symbol());
println!("Container: {}", redox_container);
println!("\n\n\n");
}
}
| conditional_block |
|
main.rs | extern crate feroxide;
use feroxide::data_atoms::*;
use feroxide::data_molecules::*;
use feroxide::data_sef::*;
use feroxide::data_sep::*;
use feroxide::*;
fn main() {
// You can create digital molecules with ease on two ways:
// ... the easy way
let carbondioxide = Molecule::from_string("CO2").unwrap();
// ... and the fast way
let carbonmonoxide = Molecule {
compounds: vec![
MoleculeCompound {
atom: CARBON,
amount: 1,
},
MoleculeCompound {
atom: OXYGEN,
amount: 1,
},
],
};
// Of which you can generate the name
let _name = carbondioxide.name();
// ... or the symbol
let symbol = carbondioxide.symbol();
// You can calculate the mass per mole
let mass_per_mole = carbondioxide.mass();
// Multiply that with your amount of moles
let weight = mass_per_mole * 10.0;
// To get your data
println!("10 moles of {} weigh {} gram(s).", symbol, weight);
// Throw a bunch of molecules together in a container with a bit of energy
let mut container = Container {
contents: vec![
ContainerCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
moles: Moles::from(10000000000.0),
},
ContainerCompound {
element: ion_from_molecule!(WATER.clone()),
moles: Moles::from(10000000000.0),
}, | element: ion_from_atom!(OXYGEN.clone()),
moles: Moles::from(10000000000.0),
},
],
available_energy: Energy::from(100_000f64), // in Joules
};
// Specify the reaction that will occur
// H₂O + CO₂ ⇌ H₂CO₃
let reaction = ElemReaction {
lhs: ReactionSide {
compounds: vec![
ReactionCompound {
element: ion_from_atom!(OXYGEN.clone()),
amount: 1,
},
ReactionCompound {
element: ion_from_molecule!(carbonmonoxide.clone()),
amount: 2,
},
],
},
rhs: ReactionSide {
compounds: vec![ReactionCompound {
element: ion_from_molecule!(carbondioxide.clone()),
amount: 2,
}],
},
is_equilibrium: true,
};
// Make sure the reaction is valid
assert!(reaction.equalise());
assert!(reaction.is_valid());
// Print the reaction in names
println!("{}", reaction.name());
// ... or in symbols (the default)
println!("{}", reaction.symbol());
// Print the contents of the container at the start
println!("Contents: {}", container);
// Run the reaction 10 times
for i in 0..10 {
// Run the reaction on the container
container.react(&reaction);
// Show what's left
println!("[{:>2}] Contents: {}", i + 1, container.to_string());
}
// Redox reactions are also possible
let redox = RedoxReaction {
oxidator: ElemReaction::<Ion>::ion_from_string("F2 + 2e <> 2F;1-").unwrap(),
reductor: ElemReaction::<Ion>::ion_from_string("Fe <> Fe;3 + 3e").unwrap(),
};
// Make sure it's valid
assert!(redox.equalise());
assert!(redox.is_valid());
// Print the symbol representation
println!("{}", redox.symbol());
// Print the SEP values
println!("oxidator: {}", get_sep(&redox.oxidator).unwrap());
println!("reductor: {}", get_sep(&redox.reductor).unwrap());
// Print the SEF value
println!(
"SEF(AlCl3) = {} kJ/mol",
get_sef(&ion_from_string!("AlCl3")).unwrap()
);
// Boom
println!("\n\n\n");
let mut water_container =
Container::<Ion>::ion_from_string("2000 H2; + 1000 O2; [10000 J]").unwrap();
println!("pre: {}", water_container);
let redox_boom = get_redox_reaction(&water_container).unwrap();
println!("reaction: {}", redox_boom.elem_reaction().symbol());
for _ in 0..100 {
water_container.react(&redox_boom);
}
println!("post: {}", water_container);
println!("\n\n\n");
// Automatic redox reactions
println!("\n\n\n");
// Get the possible redox reactions from a container
let mut redox_container = Container {
contents: vec![
ContainerCompound {
element: ion_from_string!("Fe"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("O2"),
moles: Moles::from(100.0),
},
ContainerCompound {
element: ion_from_string!("H2O"),
moles: Moles::from(200.0),
},
],
available_energy: Energy::from(100_000f64),
};
let redox_reaction = get_redox_reaction(&redox_container);
if let Some(redox) = redox_reaction {
println!("\n\n");
println!("Container: {}", redox_container);
println!("\tcan have the following reaction:");
println!("Redox reaction: \n{}", redox.symbol());
println!("Total reaction: {}", redox.elem_reaction().symbol());
for _ in 0..100 {
redox_container.react(&redox);
}
println!("\n");
println!("After 100 times:");
println!("Container: {}", redox_container);
let rust = ElemReaction::<Ion>::ion_from_string("Fe;2+ + 2OH;- > FeO2H2;0").unwrap();
println!("\n");
println!("Container: {}", &redox_container);
println!("\tcan have the following reaction:");
println!("Salt reaction: \n{}", rust.symbol());
let fe2 = ContainerCompound::<Ion>::ion_from_string("Fe;2+").unwrap();
while redox_container.contains(&fe2) {
redox_container.react(&rust);
}
println!("\n");
println!("After all {} is gone:", fe2.symbol());
println!("Container: {}", redox_container);
println!("\n\n\n");
}
} | ContainerCompound { | random_line_split |
entry.js | /*
Copyright 2016 Paul Bevis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from 'react';
import {render} from 'react-dom';
import {Provider} from 'react-redux';
import Spelling from './js/containers/spelling';
import {createStore} from 'redux';
import spellingAppReducers from './js/reducers/spelling';
import injectTapEventPlugin from 'react-tap-event-plugin';
injectTapEventPlugin();
const store = createStore(spellingAppReducers);
render(
<Provider store={store}>
<Spelling />
</Provider>,
document.getElementById('framework')
);
if (typeof Object.assign != 'function') {
(function() {
Object.assign = function(target) {
'use strict';
if (target === undefined || target === null) {
throw new TypeError('Cannot convert undefined or null to object');
}
var output = Object(target);
for (var index = 1; index < arguments.length; index++) {
var source = arguments[index];
if (source !== undefined && source !== null) {
for (var nextKey in source) {
if (source.hasOwnProperty(nextKey)) {
output[nextKey] = source[nextKey];
}
}
}
}
return output;
};
})();
}
if (!Array.prototype.find) {
Array.prototype.find = function(predicate) {
if (this === null) {
throw new TypeError('Array.prototype.find called on null or undefined');
}
if (typeof predicate !== 'function') {
throw new TypeError('predicate must be a function');
}
var list = Object(this);
var length = list.length >>> 0;
var thisArg = arguments[1];
var value;
for (var i = 0; i < length; i++) {
value = list[i];
if (predicate.call(thisArg, value, i, list)) |
}
return undefined;
};
}
| {
return value;
} | conditional_block |
entry.js | /*
Copyright 2016 Paul Bevis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software | */
import React from 'react';
import {render} from 'react-dom';
import {Provider} from 'react-redux';
import Spelling from './js/containers/spelling';
import {createStore} from 'redux';
import spellingAppReducers from './js/reducers/spelling';
import injectTapEventPlugin from 'react-tap-event-plugin';
injectTapEventPlugin();
const store = createStore(spellingAppReducers);
render(
<Provider store={store}>
<Spelling />
</Provider>,
document.getElementById('framework')
);
if (typeof Object.assign != 'function') {
(function() {
Object.assign = function(target) {
'use strict';
if (target === undefined || target === null) {
throw new TypeError('Cannot convert undefined or null to object');
}
var output = Object(target);
for (var index = 1; index < arguments.length; index++) {
var source = arguments[index];
if (source !== undefined && source !== null) {
for (var nextKey in source) {
if (source.hasOwnProperty(nextKey)) {
output[nextKey] = source[nextKey];
}
}
}
}
return output;
};
})();
}
if (!Array.prototype.find) {
Array.prototype.find = function(predicate) {
if (this === null) {
throw new TypeError('Array.prototype.find called on null or undefined');
}
if (typeof predicate !== 'function') {
throw new TypeError('predicate must be a function');
}
var list = Object(this);
var length = list.length >>> 0;
var thisArg = arguments[1];
var value;
for (var i = 0; i < length; i++) {
value = list[i];
if (predicate.call(thisArg, value, i, list)) {
return value;
}
}
return undefined;
};
} | distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | random_line_split |
11-tensormrtrix.py | # -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
|
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
| cmd += "-mask {} ".format(mask) | conditional_block |
11-tensormrtrix.py | # -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
|
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
| return self.get("ignore") | identifier_body |
11-tensormrtrix.py | # -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask): |
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages |
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa') | random_line_split |
11-tensormrtrix.py | # -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def | (self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
| isIgnore | identifier_name |
series_test.py | import unittest
from series import slices
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class SeriesTest(unittest.TestCase):
def test_slices_of_one_from_one(self):
self.assertEqual(slices("1", 1), ["1"])
def test_slices_of_one_from_two(self):
self.assertEqual(slices("12", 1), ["1", "2"])
def test_slices_of_two(self):
self.assertEqual(slices("35", 2), ["35"])
def test_slices_of_two_overlap(self):
self.assertEqual(slices("9142", 2), ["91", "14", "42"])
def test_slices_can_include_duplicates(self):
self.assertEqual(slices("777777", 3), ["777", "777", "777", "777"])
def test_slices_of_a_long_series(self):
self.assertEqual(
slices("918493904243", 5),
["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"],
)
def test_slice_length_is_too_large(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 6)
def test_slice_length_cannot_be_zero(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 0)
def test_slice_length_cannot_be_negative(self):
with self.assertRaisesWithMessage(ValueError):
slices("123", -1)
def test_empty_series_is_invalid(self):
with self.assertRaisesWithMessage(ValueError):
slices("", 1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
| unittest.main() | conditional_block |
|
series_test.py | import unittest
from series import slices
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class SeriesTest(unittest.TestCase):
def test_slices_of_one_from_one(self):
self.assertEqual(slices("1", 1), ["1"])
def test_slices_of_one_from_two(self):
self.assertEqual(slices("12", 1), ["1", "2"])
def test_slices_of_two(self):
self.assertEqual(slices("35", 2), ["35"])
def test_slices_of_two_overlap(self):
self.assertEqual(slices("9142", 2), ["91", "14", "42"])
def test_slices_can_include_duplicates(self):
self.assertEqual(slices("777777", 3), ["777", "777", "777", "777"])
def test_slices_of_a_long_series(self):
self.assertEqual(
slices("918493904243", 5),
["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"],
)
def test_slice_length_is_too_large(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 6) | slices("12345", 0)
def test_slice_length_cannot_be_negative(self):
with self.assertRaisesWithMessage(ValueError):
slices("123", -1)
def test_empty_series_is_invalid(self):
with self.assertRaisesWithMessage(ValueError):
slices("", 1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main() |
def test_slice_length_cannot_be_zero(self):
with self.assertRaisesWithMessage(ValueError): | random_line_split |
series_test.py | import unittest
from series import slices
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class SeriesTest(unittest.TestCase):
def test_slices_of_one_from_one(self):
self.assertEqual(slices("1", 1), ["1"])
def test_slices_of_one_from_two(self):
self.assertEqual(slices("12", 1), ["1", "2"])
def test_slices_of_two(self):
self.assertEqual(slices("35", 2), ["35"])
def test_slices_of_two_overlap(self):
self.assertEqual(slices("9142", 2), ["91", "14", "42"])
def test_slices_can_include_duplicates(self):
self.assertEqual(slices("777777", 3), ["777", "777", "777", "777"])
def test_slices_of_a_long_series(self):
self.assertEqual(
slices("918493904243", 5),
["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"],
)
def test_slice_length_is_too_large(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 6)
def test_slice_length_cannot_be_zero(self):
|
def test_slice_length_cannot_be_negative(self):
with self.assertRaisesWithMessage(ValueError):
slices("123", -1)
def test_empty_series_is_invalid(self):
with self.assertRaisesWithMessage(ValueError):
slices("", 1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
| with self.assertRaisesWithMessage(ValueError):
slices("12345", 0) | identifier_body |
series_test.py | import unittest
from series import slices
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class SeriesTest(unittest.TestCase):
def test_slices_of_one_from_one(self):
self.assertEqual(slices("1", 1), ["1"])
def test_slices_of_one_from_two(self):
self.assertEqual(slices("12", 1), ["1", "2"])
def test_slices_of_two(self):
self.assertEqual(slices("35", 2), ["35"])
def test_slices_of_two_overlap(self):
self.assertEqual(slices("9142", 2), ["91", "14", "42"])
def test_slices_can_include_duplicates(self):
self.assertEqual(slices("777777", 3), ["777", "777", "777", "777"])
def test_slices_of_a_long_series(self):
self.assertEqual(
slices("918493904243", 5),
["91849", "18493", "84939", "49390", "93904", "39042", "90424", "04243"],
)
def test_slice_length_is_too_large(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 6)
def test_slice_length_cannot_be_zero(self):
with self.assertRaisesWithMessage(ValueError):
slices("12345", 0)
def test_slice_length_cannot_be_negative(self):
with self.assertRaisesWithMessage(ValueError):
slices("123", -1)
def test_empty_series_is_invalid(self):
with self.assertRaisesWithMessage(ValueError):
slices("", 1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def | (self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
| assertRaisesWithMessage | identifier_name |
read_file.rs | /*
* How to read a file.
* Future work: as a variant, we may use the C bindings to call mmap/munmap
*/
use std::io;
use std::result;
/* read the file path by calling the read_whole_file_str function */
fn | (path: ~str) -> ~str {
let res = io::read_whole_file_str(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
res.get()
}
/* read the file path line by line */
fn read_file_lines(path: ~str) -> ~str {
let res = io::file_reader(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
let mut content = ~"";
let reader = res.get();
loop {
let line = reader.read_line();
if reader.eof() {
break;
}
// read_line does not return the '\n', so we add it
content = content + line + "\n";
}
content
}
fn main() {
let filename = ~"read_file.rs";
//let content = read_file_whole(copy filename);
let content = read_file_lines(copy filename);
io::println("the content of " + filename + " is [\n" + content + "]");
}
| read_file_whole | identifier_name |
read_file.rs | /*
* How to read a file.
* Future work: as a variant, we may use the C bindings to call mmap/munmap
*/
use std::io;
use std::result;
/* read the file path by calling the read_whole_file_str function */
fn read_file_whole(path: ~str) -> ~str {
let res = io::read_whole_file_str(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
res.get()
}
/* read the file path line by line */
fn read_file_lines(path: ~str) -> ~str |
fn main() {
let filename = ~"read_file.rs";
//let content = read_file_whole(copy filename);
let content = read_file_lines(copy filename);
io::println("the content of " + filename + " is [\n" + content + "]");
}
| {
let res = io::file_reader(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
let mut content = ~"";
let reader = res.get();
loop {
let line = reader.read_line();
if reader.eof() {
break;
}
// read_line does not return the '\n', so we add it
content = content + line + "\n";
}
content
} | identifier_body |
read_file.rs | /*
* How to read a file. |
use std::io;
use std::result;
/* read the file path by calling the read_whole_file_str function */
fn read_file_whole(path: ~str) -> ~str {
let res = io::read_whole_file_str(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
res.get()
}
/* read the file path line by line */
fn read_file_lines(path: ~str) -> ~str {
let res = io::file_reader(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
let mut content = ~"";
let reader = res.get();
loop {
let line = reader.read_line();
if reader.eof() {
break;
}
// read_line does not return the '\n', so we add it
content = content + line + "\n";
}
content
}
fn main() {
let filename = ~"read_file.rs";
//let content = read_file_whole(copy filename);
let content = read_file_lines(copy filename);
io::println("the content of " + filename + " is [\n" + content + "]");
} | * Future work: as a variant, we may use the C bindings to call mmap/munmap
*/ | random_line_split |
read_file.rs | /*
* How to read a file.
* Future work: as a variant, we may use the C bindings to call mmap/munmap
*/
use std::io;
use std::result;
/* read the file path by calling the read_whole_file_str function */
fn read_file_whole(path: ~str) -> ~str {
let res = io::read_whole_file_str(&Path(path));
if result::is_err(&res) {
fail!(~"file_reader error: " + result::get_err(&res));
}
res.get()
}
/* read the file path line by line */
fn read_file_lines(path: ~str) -> ~str {
let res = io::file_reader(&Path(path));
if result::is_err(&res) |
let mut content = ~"";
let reader = res.get();
loop {
let line = reader.read_line();
if reader.eof() {
break;
}
// read_line does not return the '\n', so we add it
content = content + line + "\n";
}
content
}
fn main() {
let filename = ~"read_file.rs";
//let content = read_file_whole(copy filename);
let content = read_file_lines(copy filename);
io::println("the content of " + filename + " is [\n" + content + "]");
}
| {
fail!(~"file_reader error: " + result::get_err(&res));
} | conditional_block |
TrayHandler.test.main.ts | /*
* Wire
* Copyright (C) 2019 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/.
*
*/
import * as assert from 'assert';
import {BrowserWindow, Tray} from 'electron';
import * as path from 'path';
import * as sinon from 'sinon';
import {TrayHandler} from './TrayHandler';
const fixturesDir = path.join(__dirname, '../../test/fixtures');
const TrayMock = new Tray(path.join(fixturesDir, 'tray.png'));
describe('TrayHandler', () => {
describe('initTray', () => {
it('creates native images for all tray icons and sets a default tray icon', () => {
const tray = new TrayHandler();
tray.initTray(TrayMock);
assert.strictEqual(Object.keys(tray['icons']!).length, 3);
assert.strictEqual(tray['icons']!.badge.constructor.name, 'NativeImage');
assert.strictEqual(tray['icons']!.tray.constructor.name, 'NativeImage');
assert.strictEqual(tray['icons']!.trayWithBadge.constructor.name, 'NativeImage');
sinon.assert.match(tray['trayIcon']!, sinon.match.defined);
});
});
describe('showUnreadCount', () => {
describe('without tray icon initialization', () => {
it('updates the badge counter and stops flashing the app frame when app is in focus while receiving new messages', async () => {
const tray = new TrayHandler();
tray.initTray(TrayMock);
const appWindow = new BrowserWindow();
const flashFrameSpy = sinon.spy(appWindow, 'flashFrame');
await appWindow.loadURL('about:blank');
assert.strictEqual(appWindow.isFocused(), true);
assert.ok(flashFrameSpy.notCalled);
tray.showUnreadCount(appWindow, 1);
| flashFrameSpy.restore();
});
});
describe('with tray icon initialization', () => {
it('updates the badge counter and stops flashing the app frame when app is in focus while receiving new messages', async () => {
const tray = new TrayHandler();
tray.initTray(TrayMock);
const appWindow = new BrowserWindow();
const flashFrameSpy = sinon.spy(appWindow, 'flashFrame');
await appWindow.loadFile(path.join(fixturesDir, 'badge.html'));
assert.strictEqual(appWindow.isFocused(), true);
assert.ok(flashFrameSpy.notCalled);
tray.showUnreadCount(appWindow, 10);
assert.ok(flashFrameSpy.firstCall.calledWith(false));
assert.strictEqual(tray['lastUnreadCount'], 10);
flashFrameSpy.restore();
});
it('flashes the app frame when app is not in focus and you receive new messages', async () => {
const tray = new TrayHandler();
tray.initTray(TrayMock);
const appWindow = new BrowserWindow({
show: false,
useContentSize: true,
});
const flashFrameSpy = sinon.spy(appWindow, 'flashFrame');
await appWindow.loadURL('about:blank');
assert.strictEqual(appWindow.isFocused(), false);
tray.showUnreadCount(appWindow, 2);
assert.ok(flashFrameSpy.firstCall.calledWith(true));
flashFrameSpy.restore();
});
it('does change the flash state if the window has already been flashed', async () => {
const tray = new TrayHandler();
tray.initTray(TrayMock);
tray['lastUnreadCount'] = 5;
const appWindow = new BrowserWindow({
show: false,
useContentSize: true,
});
const flashFrameSpy = sinon.spy(appWindow, 'flashFrame');
await appWindow.loadURL('about:blank');
assert.strictEqual(appWindow.isFocused(), false);
tray.showUnreadCount(appWindow, 2);
assert.ok(flashFrameSpy.notCalled);
flashFrameSpy.restore();
});
});
});
}); | assert.ok(flashFrameSpy.firstCall.calledWith(false));
assert.strictEqual(tray['lastUnreadCount'], 1);
| random_line_split |
lev_distance.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
pub fn lev_distance(me: &str, t: &str) -> usize {
if me.is_empty() { return t.chars().count(); }
if t.is_empty() { return me.chars().count(); }
let mut dcol: Vec<_> = (0..t.len() + 1).collect();
let mut t_last = 0;
for (i, sc) in me.chars().enumerate() {
let mut current = i;
dcol[0] = current + 1;
for (j, tc) in t.chars().enumerate() {
let next = dcol[j + 1];
if sc == tc | else {
dcol[j + 1] = cmp::min(current, next);
dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1;
}
current = next;
t_last = j;
}
}
dcol[t_last + 1]
}
#[test]
fn test_lev_distance() {
use std::char::{ from_u32, MAX };
// Test bytelength agnosticity
for c in (0..MAX as u32)
.filter_map(|i| from_u32(i))
.map(|i| i.to_string()) {
assert_eq!(lev_distance(&c[..], &c[..]), 0);
}
let a = "\nMäry häd ä little lämb\n\nLittle lämb\n";
let b = "\nMary häd ä little lämb\n\nLittle lämb\n";
let c = "Mary häd ä little lämb\n\nLittle lämb\n";
assert_eq!(lev_distance(a, b), 1);
assert_eq!(lev_distance(b, a), 1);
assert_eq!(lev_distance(a, c), 2);
assert_eq!(lev_distance(c, a), 2);
assert_eq!(lev_distance(b, c), 1);
assert_eq!(lev_distance(c, b), 1);
}
| {
dcol[j + 1] = current;
} | conditional_block |
lev_distance.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
pub fn lev_distance(me: &str, t: &str) -> usize { |
let mut dcol: Vec<_> = (0..t.len() + 1).collect();
let mut t_last = 0;
for (i, sc) in me.chars().enumerate() {
let mut current = i;
dcol[0] = current + 1;
for (j, tc) in t.chars().enumerate() {
let next = dcol[j + 1];
if sc == tc {
dcol[j + 1] = current;
} else {
dcol[j + 1] = cmp::min(current, next);
dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1;
}
current = next;
t_last = j;
}
}
dcol[t_last + 1]
}
#[test]
fn test_lev_distance() {
use std::char::{ from_u32, MAX };
// Test bytelength agnosticity
for c in (0..MAX as u32)
.filter_map(|i| from_u32(i))
.map(|i| i.to_string()) {
assert_eq!(lev_distance(&c[..], &c[..]), 0);
}
let a = "\nMäry häd ä little lämb\n\nLittle lämb\n";
let b = "\nMary häd ä little lämb\n\nLittle lämb\n";
let c = "Mary häd ä little lämb\n\nLittle lämb\n";
assert_eq!(lev_distance(a, b), 1);
assert_eq!(lev_distance(b, a), 1);
assert_eq!(lev_distance(a, c), 2);
assert_eq!(lev_distance(c, a), 2);
assert_eq!(lev_distance(b, c), 1);
assert_eq!(lev_distance(c, b), 1);
} | if me.is_empty() { return t.chars().count(); }
if t.is_empty() { return me.chars().count(); } | random_line_split |
lev_distance.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
pub fn lev_distance(me: &str, t: &str) -> usize {
if me.is_empty() { return t.chars().count(); }
if t.is_empty() { return me.chars().count(); }
let mut dcol: Vec<_> = (0..t.len() + 1).collect();
let mut t_last = 0;
for (i, sc) in me.chars().enumerate() {
let mut current = i;
dcol[0] = current + 1;
for (j, tc) in t.chars().enumerate() {
let next = dcol[j + 1];
if sc == tc {
dcol[j + 1] = current;
} else {
dcol[j + 1] = cmp::min(current, next);
dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1;
}
current = next;
t_last = j;
}
}
dcol[t_last + 1]
}
#[test]
fn test_lev_distance() | {
use std::char::{ from_u32, MAX };
// Test bytelength agnosticity
for c in (0..MAX as u32)
.filter_map(|i| from_u32(i))
.map(|i| i.to_string()) {
assert_eq!(lev_distance(&c[..], &c[..]), 0);
}
let a = "\nMäry häd ä little lämb\n\nLittle lämb\n";
let b = "\nMary häd ä little lämb\n\nLittle lämb\n";
let c = "Mary häd ä little lämb\n\nLittle lämb\n";
assert_eq!(lev_distance(a, b), 1);
assert_eq!(lev_distance(b, a), 1);
assert_eq!(lev_distance(a, c), 2);
assert_eq!(lev_distance(c, a), 2);
assert_eq!(lev_distance(b, c), 1);
assert_eq!(lev_distance(c, b), 1);
}
| identifier_body |
|
lev_distance.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
pub fn lev_distance(me: &str, t: &str) -> usize {
if me.is_empty() { return t.chars().count(); }
if t.is_empty() { return me.chars().count(); }
let mut dcol: Vec<_> = (0..t.len() + 1).collect();
let mut t_last = 0;
for (i, sc) in me.chars().enumerate() {
let mut current = i;
dcol[0] = current + 1;
for (j, tc) in t.chars().enumerate() {
let next = dcol[j + 1];
if sc == tc {
dcol[j + 1] = current;
} else {
dcol[j + 1] = cmp::min(current, next);
dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1;
}
current = next;
t_last = j;
}
}
dcol[t_last + 1]
}
#[test]
fn | () {
use std::char::{ from_u32, MAX };
// Test bytelength agnosticity
for c in (0..MAX as u32)
.filter_map(|i| from_u32(i))
.map(|i| i.to_string()) {
assert_eq!(lev_distance(&c[..], &c[..]), 0);
}
let a = "\nMäry häd ä little lämb\n\nLittle lämb\n";
let b = "\nMary häd ä little lämb\n\nLittle lämb\n";
let c = "Mary häd ä little lämb\n\nLittle lämb\n";
assert_eq!(lev_distance(a, b), 1);
assert_eq!(lev_distance(b, a), 1);
assert_eq!(lev_distance(a, c), 2);
assert_eq!(lev_distance(c, a), 2);
assert_eq!(lev_distance(b, c), 1);
assert_eq!(lev_distance(c, b), 1);
}
| test_lev_distance | identifier_name |
newjobpost.js | (function(){
'use strict';
/**
* @ngdoc function
* @name 343LandingPageApp.controller:NewjobpostCtrl
* @description
* # NewjobpostCtrl
* Controller of the 343LandingPageApp
*/
angular.module('343LandingPageApp')
.controller('NewjobpostCtrl', ['$scope', '$http', 'authFact', function ($scope, $http, authFact) {
var token = JSON.parse(authFact.getToken()).token;
// init the datab model
$scope.job = {
title: '',
description:'',
requirements:'',
location:'',
compensation:'',
benefits:'',
howToApply:'',
token: token
};
| var req = {
method: 'POST',
url: 'http://neadcom.wwwss24.a2hosted.com/343TruckingAPI/api/v1/trucking/job',
headers: {'Content-Type': 'application/x-www-form-urlencoded'},
data: $scope.job
};
$scope.save = function(){
$http(req)
.then(function(response){
// success
window.alert(response.data.message);
// clear input fields
$scope.clearInputFields();
}, function(response){
// error
window.alert(response.status+' '+ response.statusText + ' error');
$scope.errorMessages = response.data;
});
};
$scope.clearInputFields = function(){
$scope.job = null;
};
}]);
}()); | random_line_split |
|
landing-util.js | (function($) {
/**
* Moves elements to/from the first positions of their respective parents.
* @param {jQuery} $elements Elements (or selector) to move.
* @param {bool} condition If true, moves elements to the top. Otherwise, moves elements back to their original locations.
*/
$.prioritize = function($elements, condition) {
var key = '__prioritize';
// Expand $elements if it's not already a jQuery object.
if (typeof $elements != 'jQuery')
$elements = $($elements);
// Step through elements.
$elements.each(function() {
var $e = $(this), $p,
$parent = $e.parent();
| // Not moved? Move it.
if (!$e.data(key)) {
// Condition is false? Bail.
if (!condition)
return;
// Get placeholder (which will serve as our point of reference for when this element needs to move back).
$p = $e.prev();
// Couldn't find anything? Means this element's already at the top, so bail.
if ($p.length == 0)
return;
// Move element to top of parent.
$e.prependTo($parent);
// Mark element as moved.
$e.data(key, $p);
}
// Moved already?
else {
// Condition is true? Bail.
if (condition)
return;
$p = $e.data(key);
// Move element back to its original location (using our placeholder).
$e.insertAfter($p);
// Unmark element as moved.
$e.removeData(key);
}
});
};
})(jQuery); | // No parent? Bail.
if ($parent.length == 0)
return;
| random_line_split |
averesultgraph-consumer-bread.py | # Draw graph x-axios is the number of nodes in the network.
import re
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
from mininet.log import setLogLevel, output, info
# Read the experimental result data file in a specfied directory and generate a list of data file name.
def sortDataFile(dataFilePath, dataFileDir):
dataFileList = os.listdir("%s/data/%s" % (dataFilePath, dataFileDir))
# delete file names that are not statistic result.
i = 0
for dataFileName in dataFileList:
if not ("statResult" in dataFileName):
dataFileList.pop(i)
i = i+1
# Sort data file according to file name (the number of nodes) by using bubble algorithm
i = 0
while i < len(dataFileList)-1:
try:
j = 0
while j < len(dataFileList)-1-i:
fileName = dataFileList[j].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j = fileName[startChar : endChar]
# after j
fileName=dataFileList[j+1].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j1 = fileName[startChar : endChar]
if int(nodeNumber_j1.strip()) < int(nodeNumber_j.strip()):
tmp = dataFileList[j]
dataFileList[j] = dataFileList[j+1]
dataFileList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataFileList
# Read a data file and convert to two dimession List.
def readFileData(dataFilePath, dataFileDir, dataFileName):
data_file = open ("%s/data/%s/%s" % (dataFilePath, dataFileDir, dataFileName), "r")
lineField = []
dataLine = []
for line in data_file:
lineString = ""
j=0
# read a line data and generate list of fields
while j < len(line):
if not (line[j] == " "):
lineString = lineString + str(line[j].strip())
if j == len(line)-1:
lineField.append(lineString.strip())
else:
lineField.append(lineString.strip())
lineString = ""
j = j+1
dataLine.append(lineField)
lineField = []
return dataLine
# Sort two dimession List
def listSort(dataList, sortCol):
"sortCol: the specified colume used for sorting."
i = 0
while i < len(dataList)-1:
try:
j = 0
while j < len(dataList)-1-i:
if float(dataList[j+1][sortCol]) < float(dataList[j][sortCol].strip()):
tmp = dataList[j]
dataList[j] = dataList[j+1]
dataList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataList
#Calculate average statistic result in one experiment.
def aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol):
|
# randomly generate color
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
# Draw statistical graph
def drawStatGraph(dataFilePath, dataFileDir, aveCol):
# setting a style to use
plt.style.use('fivethirtyeight')
# create a figure
fig = plt.figure()
# define subplots and their positions in figure
plt1 = fig.add_subplot(221, axisbg='white')
plt2 = fig.add_subplot(222, axisbg='white')
plt3 = fig.add_subplot(223, axisbg='white')
plt4 = fig.add_subplot(224, axisbg='white')
# plotting the line 1 points
plt1.axis([5, 35, 4, 10])
plt2.axis([5, 35, 0, 15])
plt3.axis([5, 35, 0, 200])
plt4.axis([5, 35, 0, 2.5])
dataFileList = sortDataFile (dataFilePath, dataFileDir)
colors = ["b","g","r","y","black"]
nodes = [10, 15, 20, 25,30]
# open all data files for experiment, average data according the number of consumer
# n is times (here n=5 because {1,3,5,7,9} ) for opening these files.
# extact one colume data in all experimental result and draw one cave line according to average value in the number of consumer
n = 0
while n < 5:
col = n
# generate data for drawing cave lines
conNum = []
aveCNOI = [] #sent interest packets before
aveDLY = [] # delay
aveNOI = [] # the total number of interest packets trasmitted on network
aveIPLR = [] # Packet loss rate of interest packet
for dataFileName in dataFileList:
# calculate average value according to the number of consumers in statResult-xx.dat
# the returned results are saved in some List.
conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR = \
aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol)
startChar = dataFileName.index("-") + len("-")
endChar = dataFileName.index(".", startChar)
nodeNumber = dataFileName[startChar : endChar]
# extract the data of the specified colume.
conNum = conNodes[col]
aveCNOI.append(aveConNumOutInt[col])
aveDLY.append(aveDelay[col])
aveNOI.append(aveNumOutInt[col])
aveIPLR.append(aveIntPLR[col])
# draw one cave line according the specified number of consumers, for example 1,3,5,7, or 9
labelChar = "CN=" + str(conNum)
colorN = int((n+1)*5)/5 - 1
color = colors[colorN]
plt1.plot(nodes, aveCNOI, color=color, linestyle='solid', label = labelChar,marker='s',markerfacecolor=color, markersize=10)
plt2.plot(nodes, aveDLY, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
plt3.plot(nodes, aveNOI, color=color, linestyle='solid',label=labelChar, marker='s', markerfacecolor=color, markersize=10)
plt4.plot(nodes, aveIPLR, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
n = n+1
plt1.set_title('The Number of Interest Packet')
plt2.set_title('Average Delay')
plt3.set_title('The Total Number of Interest Packet')
plt4.set_title('Packet Loss Rate of Interest Packet')
#plt1.xlabel('nodes')
#plt1.ylabel('ISR')
#plt1.title('Average Delay')
plt1.legend(loc='upper left')
plt2.legend(loc='upper right')
plt3.legend(loc='upper left')
plt4.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
setLogLevel('info')
dataFilePath = os.path.abspath(os.path.dirname(sys.argv[0]))
# drawStatGraph(dataFilePath, 'oppo', 0)
drawStatGraph(dataFilePath, 'bread', 0)
| "aveCol: the specified colume used for calculating a average value"
# Caculate average value according to a specified column
# Read data file and generate a list
dataList = readFileData(dataFilePath, dataFileDir, dataFileName)
sortDataList = listSort(dataList, aveCol)
conNodes = []
aveConNumOutInt = []
aveDelay = []
aveNumOutInt = []
aveIntPLR = []
aveDataPLR = []
avePLR = []
i = 0
while i < len(sortDataList):
conNumOutInt = float(sortDataList[i][2].strip())
Delay = float(sortDataList[i][3].strip())
numOutInt = float(sortDataList[i][4].strip())
IntPLR = float(sortDataList[i][8].strip())
DataPLR = float(sortDataList[i][9].strip())
PLR = float(sortDataList[i][10].strip())
tmp = sortDataList[i][aveCol].strip()
j = i+1
n = 1
flag = True
while (j < len(sortDataList)) and flag:
if sortDataList[j][aveCol] == tmp:
n = n + 1
conNumOutInt = conNumOutInt + float(sortDataList[j][2].strip())
Delay = Delay + float(sortDataList[j][3].strip())
numOutInt = numOutInt + float(sortDataList[j][4].strip())
IntPLR = IntPLR + float(sortDataList[j][8].strip())
DataPLR = DataPLR + float(sortDataList[j][9].strip())
PLR = PLR + float(sortDataList[j][10].strip())
j = j+1
else:
flag = False
i = j
conNodes.append(int(tmp))
aveConNumOutInt.append(conNumOutInt/n)
aveDelay.append(Delay/n)
aveNumOutInt.append(numOutInt/n)
aveIntPLR.append(IntPLR/n)
aveDataPLR.append(DataPLR/n)
avePLR.append(PLR/n)
return conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR | identifier_body |
averesultgraph-consumer-bread.py | # Draw graph x-axios is the number of nodes in the network.
import re
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
from mininet.log import setLogLevel, output, info
# Read the experimental result data file in a specfied directory and generate a list of data file name.
def | (dataFilePath, dataFileDir):
dataFileList = os.listdir("%s/data/%s" % (dataFilePath, dataFileDir))
# delete file names that are not statistic result.
i = 0
for dataFileName in dataFileList:
if not ("statResult" in dataFileName):
dataFileList.pop(i)
i = i+1
# Sort data file according to file name (the number of nodes) by using bubble algorithm
i = 0
while i < len(dataFileList)-1:
try:
j = 0
while j < len(dataFileList)-1-i:
fileName = dataFileList[j].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j = fileName[startChar : endChar]
# after j
fileName=dataFileList[j+1].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j1 = fileName[startChar : endChar]
if int(nodeNumber_j1.strip()) < int(nodeNumber_j.strip()):
tmp = dataFileList[j]
dataFileList[j] = dataFileList[j+1]
dataFileList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataFileList
# Read a data file and convert to two dimession List.
def readFileData(dataFilePath, dataFileDir, dataFileName):
data_file = open ("%s/data/%s/%s" % (dataFilePath, dataFileDir, dataFileName), "r")
lineField = []
dataLine = []
for line in data_file:
lineString = ""
j=0
# read a line data and generate list of fields
while j < len(line):
if not (line[j] == " "):
lineString = lineString + str(line[j].strip())
if j == len(line)-1:
lineField.append(lineString.strip())
else:
lineField.append(lineString.strip())
lineString = ""
j = j+1
dataLine.append(lineField)
lineField = []
return dataLine
# Sort two dimession List
def listSort(dataList, sortCol):
"sortCol: the specified colume used for sorting."
i = 0
while i < len(dataList)-1:
try:
j = 0
while j < len(dataList)-1-i:
if float(dataList[j+1][sortCol]) < float(dataList[j][sortCol].strip()):
tmp = dataList[j]
dataList[j] = dataList[j+1]
dataList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataList
#Calculate average statistic result in one experiment.
def aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol):
"aveCol: the specified colume used for calculating a average value"
# Caculate average value according to a specified column
# Read data file and generate a list
dataList = readFileData(dataFilePath, dataFileDir, dataFileName)
sortDataList = listSort(dataList, aveCol)
conNodes = []
aveConNumOutInt = []
aveDelay = []
aveNumOutInt = []
aveIntPLR = []
aveDataPLR = []
avePLR = []
i = 0
while i < len(sortDataList):
conNumOutInt = float(sortDataList[i][2].strip())
Delay = float(sortDataList[i][3].strip())
numOutInt = float(sortDataList[i][4].strip())
IntPLR = float(sortDataList[i][8].strip())
DataPLR = float(sortDataList[i][9].strip())
PLR = float(sortDataList[i][10].strip())
tmp = sortDataList[i][aveCol].strip()
j = i+1
n = 1
flag = True
while (j < len(sortDataList)) and flag:
if sortDataList[j][aveCol] == tmp:
n = n + 1
conNumOutInt = conNumOutInt + float(sortDataList[j][2].strip())
Delay = Delay + float(sortDataList[j][3].strip())
numOutInt = numOutInt + float(sortDataList[j][4].strip())
IntPLR = IntPLR + float(sortDataList[j][8].strip())
DataPLR = DataPLR + float(sortDataList[j][9].strip())
PLR = PLR + float(sortDataList[j][10].strip())
j = j+1
else:
flag = False
i = j
conNodes.append(int(tmp))
aveConNumOutInt.append(conNumOutInt/n)
aveDelay.append(Delay/n)
aveNumOutInt.append(numOutInt/n)
aveIntPLR.append(IntPLR/n)
aveDataPLR.append(DataPLR/n)
avePLR.append(PLR/n)
return conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR
# randomly generate color
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
# Draw statistical graph
def drawStatGraph(dataFilePath, dataFileDir, aveCol):
# setting a style to use
plt.style.use('fivethirtyeight')
# create a figure
fig = plt.figure()
# define subplots and their positions in figure
plt1 = fig.add_subplot(221, axisbg='white')
plt2 = fig.add_subplot(222, axisbg='white')
plt3 = fig.add_subplot(223, axisbg='white')
plt4 = fig.add_subplot(224, axisbg='white')
# plotting the line 1 points
plt1.axis([5, 35, 4, 10])
plt2.axis([5, 35, 0, 15])
plt3.axis([5, 35, 0, 200])
plt4.axis([5, 35, 0, 2.5])
dataFileList = sortDataFile (dataFilePath, dataFileDir)
colors = ["b","g","r","y","black"]
nodes = [10, 15, 20, 25,30]
# open all data files for experiment, average data according the number of consumer
# n is times (here n=5 because {1,3,5,7,9} ) for opening these files.
# extact one colume data in all experimental result and draw one cave line according to average value in the number of consumer
n = 0
while n < 5:
col = n
# generate data for drawing cave lines
conNum = []
aveCNOI = [] #sent interest packets before
aveDLY = [] # delay
aveNOI = [] # the total number of interest packets trasmitted on network
aveIPLR = [] # Packet loss rate of interest packet
for dataFileName in dataFileList:
# calculate average value according to the number of consumers in statResult-xx.dat
# the returned results are saved in some List.
conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR = \
aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol)
startChar = dataFileName.index("-") + len("-")
endChar = dataFileName.index(".", startChar)
nodeNumber = dataFileName[startChar : endChar]
# extract the data of the specified colume.
conNum = conNodes[col]
aveCNOI.append(aveConNumOutInt[col])
aveDLY.append(aveDelay[col])
aveNOI.append(aveNumOutInt[col])
aveIPLR.append(aveIntPLR[col])
# draw one cave line according the specified number of consumers, for example 1,3,5,7, or 9
labelChar = "CN=" + str(conNum)
colorN = int((n+1)*5)/5 - 1
color = colors[colorN]
plt1.plot(nodes, aveCNOI, color=color, linestyle='solid', label = labelChar,marker='s',markerfacecolor=color, markersize=10)
plt2.plot(nodes, aveDLY, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
plt3.plot(nodes, aveNOI, color=color, linestyle='solid',label=labelChar, marker='s', markerfacecolor=color, markersize=10)
plt4.plot(nodes, aveIPLR, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
n = n+1
plt1.set_title('The Number of Interest Packet')
plt2.set_title('Average Delay')
plt3.set_title('The Total Number of Interest Packet')
plt4.set_title('Packet Loss Rate of Interest Packet')
#plt1.xlabel('nodes')
#plt1.ylabel('ISR')
#plt1.title('Average Delay')
plt1.legend(loc='upper left')
plt2.legend(loc='upper right')
plt3.legend(loc='upper left')
plt4.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
setLogLevel('info')
dataFilePath = os.path.abspath(os.path.dirname(sys.argv[0]))
# drawStatGraph(dataFilePath, 'oppo', 0)
drawStatGraph(dataFilePath, 'bread', 0)
| sortDataFile | identifier_name |
averesultgraph-consumer-bread.py | # Draw graph x-axios is the number of nodes in the network.
import re
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
from mininet.log import setLogLevel, output, info
# Read the experimental result data file in a specfied directory and generate a list of data file name.
def sortDataFile(dataFilePath, dataFileDir):
dataFileList = os.listdir("%s/data/%s" % (dataFilePath, dataFileDir))
# delete file names that are not statistic result.
i = 0
for dataFileName in dataFileList:
if not ("statResult" in dataFileName):
dataFileList.pop(i)
i = i+1
# Sort data file according to file name (the number of nodes) by using bubble algorithm
i = 0
while i < len(dataFileList)-1:
try:
j = 0
while j < len(dataFileList)-1-i:
fileName = dataFileList[j].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j = fileName[startChar : endChar]
# after j
fileName=dataFileList[j+1].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j1 = fileName[startChar : endChar]
if int(nodeNumber_j1.strip()) < int(nodeNumber_j.strip()):
tmp = dataFileList[j]
dataFileList[j] = dataFileList[j+1]
dataFileList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataFileList
# Read a data file and convert to two dimession List.
def readFileData(dataFilePath, dataFileDir, dataFileName):
data_file = open ("%s/data/%s/%s" % (dataFilePath, dataFileDir, dataFileName), "r")
lineField = []
dataLine = []
for line in data_file:
lineString = ""
j=0
# read a line data and generate list of fields
while j < len(line):
if not (line[j] == " "):
lineString = lineString + str(line[j].strip())
if j == len(line)-1:
lineField.append(lineString.strip())
else:
lineField.append(lineString.strip())
lineString = ""
j = j+1
dataLine.append(lineField)
lineField = []
return dataLine
# Sort two dimession List
def listSort(dataList, sortCol):
"sortCol: the specified colume used for sorting."
i = 0
while i < len(dataList)-1:
try:
j = 0
while j < len(dataList)-1-i:
if float(dataList[j+1][sortCol]) < float(dataList[j][sortCol].strip()):
tmp = dataList[j]
dataList[j] = dataList[j+1]
dataList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataList
#Calculate average statistic result in one experiment.
def aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol):
"aveCol: the specified colume used for calculating a average value"
# Caculate average value according to a specified column
# Read data file and generate a list
dataList = readFileData(dataFilePath, dataFileDir, dataFileName)
sortDataList = listSort(dataList, aveCol)
conNodes = []
aveConNumOutInt = []
aveDelay = []
aveNumOutInt = []
aveIntPLR = []
aveDataPLR = []
avePLR = []
i = 0
while i < len(sortDataList):
conNumOutInt = float(sortDataList[i][2].strip())
Delay = float(sortDataList[i][3].strip())
numOutInt = float(sortDataList[i][4].strip())
IntPLR = float(sortDataList[i][8].strip())
DataPLR = float(sortDataList[i][9].strip())
PLR = float(sortDataList[i][10].strip())
tmp = sortDataList[i][aveCol].strip()
j = i+1
n = 1
flag = True
while (j < len(sortDataList)) and flag:
if sortDataList[j][aveCol] == tmp:
n = n + 1
conNumOutInt = conNumOutInt + float(sortDataList[j][2].strip())
Delay = Delay + float(sortDataList[j][3].strip()) | j = j+1
else:
flag = False
i = j
conNodes.append(int(tmp))
aveConNumOutInt.append(conNumOutInt/n)
aveDelay.append(Delay/n)
aveNumOutInt.append(numOutInt/n)
aveIntPLR.append(IntPLR/n)
aveDataPLR.append(DataPLR/n)
avePLR.append(PLR/n)
return conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR
# randomly generate color
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
# Draw statistical graph
def drawStatGraph(dataFilePath, dataFileDir, aveCol):
# setting a style to use
plt.style.use('fivethirtyeight')
# create a figure
fig = plt.figure()
# define subplots and their positions in figure
plt1 = fig.add_subplot(221, axisbg='white')
plt2 = fig.add_subplot(222, axisbg='white')
plt3 = fig.add_subplot(223, axisbg='white')
plt4 = fig.add_subplot(224, axisbg='white')
# plotting the line 1 points
plt1.axis([5, 35, 4, 10])
plt2.axis([5, 35, 0, 15])
plt3.axis([5, 35, 0, 200])
plt4.axis([5, 35, 0, 2.5])
dataFileList = sortDataFile (dataFilePath, dataFileDir)
colors = ["b","g","r","y","black"]
nodes = [10, 15, 20, 25,30]
# open all data files for experiment, average data according the number of consumer
# n is times (here n=5 because {1,3,5,7,9} ) for opening these files.
# extact one colume data in all experimental result and draw one cave line according to average value in the number of consumer
n = 0
while n < 5:
col = n
# generate data for drawing cave lines
conNum = []
aveCNOI = [] #sent interest packets before
aveDLY = [] # delay
aveNOI = [] # the total number of interest packets trasmitted on network
aveIPLR = [] # Packet loss rate of interest packet
for dataFileName in dataFileList:
# calculate average value according to the number of consumers in statResult-xx.dat
# the returned results are saved in some List.
conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR = \
aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol)
startChar = dataFileName.index("-") + len("-")
endChar = dataFileName.index(".", startChar)
nodeNumber = dataFileName[startChar : endChar]
# extract the data of the specified colume.
conNum = conNodes[col]
aveCNOI.append(aveConNumOutInt[col])
aveDLY.append(aveDelay[col])
aveNOI.append(aveNumOutInt[col])
aveIPLR.append(aveIntPLR[col])
# draw one cave line according the specified number of consumers, for example 1,3,5,7, or 9
labelChar = "CN=" + str(conNum)
colorN = int((n+1)*5)/5 - 1
color = colors[colorN]
plt1.plot(nodes, aveCNOI, color=color, linestyle='solid', label = labelChar,marker='s',markerfacecolor=color, markersize=10)
plt2.plot(nodes, aveDLY, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
plt3.plot(nodes, aveNOI, color=color, linestyle='solid',label=labelChar, marker='s', markerfacecolor=color, markersize=10)
plt4.plot(nodes, aveIPLR, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
n = n+1
plt1.set_title('The Number of Interest Packet')
plt2.set_title('Average Delay')
plt3.set_title('The Total Number of Interest Packet')
plt4.set_title('Packet Loss Rate of Interest Packet')
#plt1.xlabel('nodes')
#plt1.ylabel('ISR')
#plt1.title('Average Delay')
plt1.legend(loc='upper left')
plt2.legend(loc='upper right')
plt3.legend(loc='upper left')
plt4.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
setLogLevel('info')
dataFilePath = os.path.abspath(os.path.dirname(sys.argv[0]))
# drawStatGraph(dataFilePath, 'oppo', 0)
drawStatGraph(dataFilePath, 'bread', 0) | numOutInt = numOutInt + float(sortDataList[j][4].strip())
IntPLR = IntPLR + float(sortDataList[j][8].strip())
DataPLR = DataPLR + float(sortDataList[j][9].strip())
PLR = PLR + float(sortDataList[j][10].strip()) | random_line_split |
averesultgraph-consumer-bread.py | # Draw graph x-axios is the number of nodes in the network.
import re
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
from mininet.log import setLogLevel, output, info
# Read the experimental result data file in a specfied directory and generate a list of data file name.
def sortDataFile(dataFilePath, dataFileDir):
dataFileList = os.listdir("%s/data/%s" % (dataFilePath, dataFileDir))
# delete file names that are not statistic result.
i = 0
for dataFileName in dataFileList:
if not ("statResult" in dataFileName):
dataFileList.pop(i)
i = i+1
# Sort data file according to file name (the number of nodes) by using bubble algorithm
i = 0
while i < len(dataFileList)-1:
try:
j = 0
while j < len(dataFileList)-1-i:
fileName = dataFileList[j].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j = fileName[startChar : endChar]
# after j
fileName=dataFileList[j+1].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j1 = fileName[startChar : endChar]
if int(nodeNumber_j1.strip()) < int(nodeNumber_j.strip()):
tmp = dataFileList[j]
dataFileList[j] = dataFileList[j+1]
dataFileList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataFileList
# Read a data file and convert to two dimession List.
def readFileData(dataFilePath, dataFileDir, dataFileName):
data_file = open ("%s/data/%s/%s" % (dataFilePath, dataFileDir, dataFileName), "r")
lineField = []
dataLine = []
for line in data_file:
lineString = ""
j=0
# read a line data and generate list of fields
while j < len(line):
if not (line[j] == " "):
lineString = lineString + str(line[j].strip())
if j == len(line)-1:
lineField.append(lineString.strip())
else:
lineField.append(lineString.strip())
lineString = ""
j = j+1
dataLine.append(lineField)
lineField = []
return dataLine
# Sort two dimession List
def listSort(dataList, sortCol):
"sortCol: the specified colume used for sorting."
i = 0
while i < len(dataList)-1:
try:
j = 0
while j < len(dataList)-1-i:
if float(dataList[j+1][sortCol]) < float(dataList[j][sortCol].strip()):
tmp = dataList[j]
dataList[j] = dataList[j+1]
dataList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataList
#Calculate average statistic result in one experiment.
def aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol):
"aveCol: the specified colume used for calculating a average value"
# Caculate average value according to a specified column
# Read data file and generate a list
dataList = readFileData(dataFilePath, dataFileDir, dataFileName)
sortDataList = listSort(dataList, aveCol)
conNodes = []
aveConNumOutInt = []
aveDelay = []
aveNumOutInt = []
aveIntPLR = []
aveDataPLR = []
avePLR = []
i = 0
while i < len(sortDataList):
conNumOutInt = float(sortDataList[i][2].strip())
Delay = float(sortDataList[i][3].strip())
numOutInt = float(sortDataList[i][4].strip())
IntPLR = float(sortDataList[i][8].strip())
DataPLR = float(sortDataList[i][9].strip())
PLR = float(sortDataList[i][10].strip())
tmp = sortDataList[i][aveCol].strip()
j = i+1
n = 1
flag = True
while (j < len(sortDataList)) and flag:
|
i = j
conNodes.append(int(tmp))
aveConNumOutInt.append(conNumOutInt/n)
aveDelay.append(Delay/n)
aveNumOutInt.append(numOutInt/n)
aveIntPLR.append(IntPLR/n)
aveDataPLR.append(DataPLR/n)
avePLR.append(PLR/n)
return conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR
# randomly generate color
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
# Draw statistical graph
def drawStatGraph(dataFilePath, dataFileDir, aveCol):
# setting a style to use
plt.style.use('fivethirtyeight')
# create a figure
fig = plt.figure()
# define subplots and their positions in figure
plt1 = fig.add_subplot(221, axisbg='white')
plt2 = fig.add_subplot(222, axisbg='white')
plt3 = fig.add_subplot(223, axisbg='white')
plt4 = fig.add_subplot(224, axisbg='white')
# plotting the line 1 points
plt1.axis([5, 35, 4, 10])
plt2.axis([5, 35, 0, 15])
plt3.axis([5, 35, 0, 200])
plt4.axis([5, 35, 0, 2.5])
dataFileList = sortDataFile (dataFilePath, dataFileDir)
colors = ["b","g","r","y","black"]
nodes = [10, 15, 20, 25,30]
# open all data files for experiment, average data according the number of consumer
# n is times (here n=5 because {1,3,5,7,9} ) for opening these files.
# extact one colume data in all experimental result and draw one cave line according to average value in the number of consumer
n = 0
while n < 5:
col = n
# generate data for drawing cave lines
conNum = []
aveCNOI = [] #sent interest packets before
aveDLY = [] # delay
aveNOI = [] # the total number of interest packets trasmitted on network
aveIPLR = [] # Packet loss rate of interest packet
for dataFileName in dataFileList:
# calculate average value according to the number of consumers in statResult-xx.dat
# the returned results are saved in some List.
conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR = \
aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol)
startChar = dataFileName.index("-") + len("-")
endChar = dataFileName.index(".", startChar)
nodeNumber = dataFileName[startChar : endChar]
# extract the data of the specified colume.
conNum = conNodes[col]
aveCNOI.append(aveConNumOutInt[col])
aveDLY.append(aveDelay[col])
aveNOI.append(aveNumOutInt[col])
aveIPLR.append(aveIntPLR[col])
# draw one cave line according the specified number of consumers, for example 1,3,5,7, or 9
labelChar = "CN=" + str(conNum)
colorN = int((n+1)*5)/5 - 1
color = colors[colorN]
plt1.plot(nodes, aveCNOI, color=color, linestyle='solid', label = labelChar,marker='s',markerfacecolor=color, markersize=10)
plt2.plot(nodes, aveDLY, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
plt3.plot(nodes, aveNOI, color=color, linestyle='solid',label=labelChar, marker='s', markerfacecolor=color, markersize=10)
plt4.plot(nodes, aveIPLR, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
n = n+1
plt1.set_title('The Number of Interest Packet')
plt2.set_title('Average Delay')
plt3.set_title('The Total Number of Interest Packet')
plt4.set_title('Packet Loss Rate of Interest Packet')
#plt1.xlabel('nodes')
#plt1.ylabel('ISR')
#plt1.title('Average Delay')
plt1.legend(loc='upper left')
plt2.legend(loc='upper right')
plt3.legend(loc='upper left')
plt4.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
setLogLevel('info')
dataFilePath = os.path.abspath(os.path.dirname(sys.argv[0]))
# drawStatGraph(dataFilePath, 'oppo', 0)
drawStatGraph(dataFilePath, 'bread', 0)
| if sortDataList[j][aveCol] == tmp:
n = n + 1
conNumOutInt = conNumOutInt + float(sortDataList[j][2].strip())
Delay = Delay + float(sortDataList[j][3].strip())
numOutInt = numOutInt + float(sortDataList[j][4].strip())
IntPLR = IntPLR + float(sortDataList[j][8].strip())
DataPLR = DataPLR + float(sortDataList[j][9].strip())
PLR = PLR + float(sortDataList[j][10].strip())
j = j+1
else:
flag = False | conditional_block |
socket.service.ts | ///<reference path="../../../node_modules/@angular/core/src/facade/async.d.ts"/>
import {EventEmitter, Inject, Injectable, Output} from '@angular/core';
import { Observable } from 'rxjs/Rx';
import { APP_CONFIG, AppConfig } from '../app.config';
import { AppService } from './app.service';
import * as io from 'socket.io-client';
@Injectable()
export class SocketService {
private name: string;
socket: SocketIOClient.Socket;
constructor(@Inject(APP_CONFIG) private config: AppConfig, private appService: AppService) {
const socketUrl = 'https://yalabenanotlob.herokuapp.com';
this.socket = io.connect(socketUrl);
}
// Get items observable
get(): Observable<any> {
this.socket.on('connect', () => this.connect());
this.socket.on('disconnect', () => this.disconnect());
this.socket.on('error', (error: string) => {
console.log(`ERROR: "${error}"`);
});
// Return observable which follows "notification" and "order checkout" signals from socket stream
return new Observable((observer: any) => {
this.socket.on('refresh notifications', (notification: any) => observer.next({type: 'notification', data: notification}) );
this.socket.on('order checkout', (checkout: any) => observer.next({type: 'checkout', data: checkout}) );
// return () => this.socket.close();
});
}
see(id: any) {
console.log(id);
this.socket.emit('see notification', id);
}
checkout(order: any) {
this.socket.emit('checkout', order);
}
notify(notification: string) {
this.socket.emit('new notification', notification);
}
// Handle connection opening
private connect() {
console.log(`Connected to https://yalabenanotlob.herokuapp.com`);
// Request initial list when connected
this.socket.emit('login', this.appService.user);
}
// Handle connection closing | console.log(`Disconnected from https://yalabenanotlob.herokuapp.com`);
this.socket.emit('logout', this.appService.user);
}
} | private disconnect() { | random_line_split |
socket.service.ts | ///<reference path="../../../node_modules/@angular/core/src/facade/async.d.ts"/>
import {EventEmitter, Inject, Injectable, Output} from '@angular/core';
import { Observable } from 'rxjs/Rx';
import { APP_CONFIG, AppConfig } from '../app.config';
import { AppService } from './app.service';
import * as io from 'socket.io-client';
@Injectable()
export class SocketService {
private name: string;
socket: SocketIOClient.Socket;
constructor(@Inject(APP_CONFIG) private config: AppConfig, private appService: AppService) {
const socketUrl = 'https://yalabenanotlob.herokuapp.com';
this.socket = io.connect(socketUrl);
}
// Get items observable
get(): Observable<any> |
see(id: any) {
console.log(id);
this.socket.emit('see notification', id);
}
checkout(order: any) {
this.socket.emit('checkout', order);
}
notify(notification: string) {
this.socket.emit('new notification', notification);
}
// Handle connection opening
private connect() {
console.log(`Connected to https://yalabenanotlob.herokuapp.com`);
// Request initial list when connected
this.socket.emit('login', this.appService.user);
}
// Handle connection closing
private disconnect() {
console.log(`Disconnected from https://yalabenanotlob.herokuapp.com`);
this.socket.emit('logout', this.appService.user);
}
}
| {
this.socket.on('connect', () => this.connect());
this.socket.on('disconnect', () => this.disconnect());
this.socket.on('error', (error: string) => {
console.log(`ERROR: "${error}"`);
});
// Return observable which follows "notification" and "order checkout" signals from socket stream
return new Observable((observer: any) => {
this.socket.on('refresh notifications', (notification: any) => observer.next({type: 'notification', data: notification}) );
this.socket.on('order checkout', (checkout: any) => observer.next({type: 'checkout', data: checkout}) );
// return () => this.socket.close();
});
} | identifier_body |
socket.service.ts | ///<reference path="../../../node_modules/@angular/core/src/facade/async.d.ts"/>
import {EventEmitter, Inject, Injectable, Output} from '@angular/core';
import { Observable } from 'rxjs/Rx';
import { APP_CONFIG, AppConfig } from '../app.config';
import { AppService } from './app.service';
import * as io from 'socket.io-client';
@Injectable()
export class SocketService {
private name: string;
socket: SocketIOClient.Socket;
constructor(@Inject(APP_CONFIG) private config: AppConfig, private appService: AppService) {
const socketUrl = 'https://yalabenanotlob.herokuapp.com';
this.socket = io.connect(socketUrl);
}
// Get items observable
get(): Observable<any> {
this.socket.on('connect', () => this.connect());
this.socket.on('disconnect', () => this.disconnect());
this.socket.on('error', (error: string) => {
console.log(`ERROR: "${error}"`);
});
// Return observable which follows "notification" and "order checkout" signals from socket stream
return new Observable((observer: any) => {
this.socket.on('refresh notifications', (notification: any) => observer.next({type: 'notification', data: notification}) );
this.socket.on('order checkout', (checkout: any) => observer.next({type: 'checkout', data: checkout}) );
// return () => this.socket.close();
});
}
see(id: any) {
console.log(id);
this.socket.emit('see notification', id);
}
checkout(order: any) {
this.socket.emit('checkout', order);
}
notify(notification: string) {
this.socket.emit('new notification', notification);
}
// Handle connection opening
private connect() {
console.log(`Connected to https://yalabenanotlob.herokuapp.com`);
// Request initial list when connected
this.socket.emit('login', this.appService.user);
}
// Handle connection closing
private | () {
console.log(`Disconnected from https://yalabenanotlob.herokuapp.com`);
this.socket.emit('logout', this.appService.user);
}
}
| disconnect | identifier_name |
materialise.py | from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from itertools import islice
from petl.compat import izip_longest, text_type, next
from petl.util.base import asindices, Table
def listoflists(tbl):
return [list(row) for row in tbl]
Table.listoflists = listoflists
Table.lol = listoflists
def tupleoftuples(tbl):
return tuple(tuple(row) for row in tbl)
Table.tupleoftuples = tupleoftuples
Table.tot = tupleoftuples |
Table.listoftuples = listoftuples
Table.lot = listoftuples
def tupleoflists(tbl):
return tuple(list(row) for row in tbl)
Table.tupleoflists = tupleoflists
Table.tol = tupleoflists
def columns(table, missing=None):
"""
Construct a :class:`dict` mapping field names to lists of values. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]
>>> cols = etl.columns(table)
>>> cols['foo']
['a', 'b', 'b']
>>> cols['bar']
[1, 2, 3]
See also :func:`petl.util.materialise.facetcolumns`.
"""
cols = OrderedDict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
for f in flds:
cols[f] = list()
for row in it:
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return cols
Table.columns = columns
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
Table.facetcolumns = facetcolumns
def cache(table, n=None):
"""
Wrap the table with a cache that caches up to `n` rows as they are initially
requested via iteration (cache all rows be default).
"""
return CacheView(table, n=n)
Table.cache = cache
class CacheView(Table):
def __init__(self, inner, n=None):
self.inner = inner
self.n = n
self.cache = list()
self.cachecomplete = False
def clearcache(self):
self.cache = list()
self.cachecomplete = False
def __iter__(self):
# serve whatever is in the cache first
for row in self.cache:
yield row
if not self.cachecomplete:
# serve the remainder from the inner iterator
it = iter(self.inner)
for row in islice(it, len(self.cache), None):
# maybe there's more room in the cache?
if not self.n or len(self.cache) < self.n:
self.cache.append(row)
yield row
# does the cache contain a complete copy of the inner table?
if not self.n or len(self.cache) < self.n:
self.cachecomplete = True |
def listoftuples(tbl):
return [tuple(row) for row in tbl]
| random_line_split |
materialise.py | from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from itertools import islice
from petl.compat import izip_longest, text_type, next
from petl.util.base import asindices, Table
def | (tbl):
return [list(row) for row in tbl]
Table.listoflists = listoflists
Table.lol = listoflists
def tupleoftuples(tbl):
return tuple(tuple(row) for row in tbl)
Table.tupleoftuples = tupleoftuples
Table.tot = tupleoftuples
def listoftuples(tbl):
return [tuple(row) for row in tbl]
Table.listoftuples = listoftuples
Table.lot = listoftuples
def tupleoflists(tbl):
return tuple(list(row) for row in tbl)
Table.tupleoflists = tupleoflists
Table.tol = tupleoflists
def columns(table, missing=None):
"""
Construct a :class:`dict` mapping field names to lists of values. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]
>>> cols = etl.columns(table)
>>> cols['foo']
['a', 'b', 'b']
>>> cols['bar']
[1, 2, 3]
See also :func:`petl.util.materialise.facetcolumns`.
"""
cols = OrderedDict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
for f in flds:
cols[f] = list()
for row in it:
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return cols
Table.columns = columns
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
Table.facetcolumns = facetcolumns
def cache(table, n=None):
"""
Wrap the table with a cache that caches up to `n` rows as they are initially
requested via iteration (cache all rows be default).
"""
return CacheView(table, n=n)
Table.cache = cache
class CacheView(Table):
def __init__(self, inner, n=None):
self.inner = inner
self.n = n
self.cache = list()
self.cachecomplete = False
def clearcache(self):
self.cache = list()
self.cachecomplete = False
def __iter__(self):
# serve whatever is in the cache first
for row in self.cache:
yield row
if not self.cachecomplete:
# serve the remainder from the inner iterator
it = iter(self.inner)
for row in islice(it, len(self.cache), None):
# maybe there's more room in the cache?
if not self.n or len(self.cache) < self.n:
self.cache.append(row)
yield row
# does the cache contain a complete copy of the inner table?
if not self.n or len(self.cache) < self.n:
self.cachecomplete = True
| listoflists | identifier_name |
materialise.py | from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from itertools import islice
from petl.compat import izip_longest, text_type, next
from petl.util.base import asindices, Table
def listoflists(tbl):
return [list(row) for row in tbl]
Table.listoflists = listoflists
Table.lol = listoflists
def tupleoftuples(tbl):
return tuple(tuple(row) for row in tbl)
Table.tupleoftuples = tupleoftuples
Table.tot = tupleoftuples
def listoftuples(tbl):
return [tuple(row) for row in tbl]
Table.listoftuples = listoftuples
Table.lot = listoftuples
def tupleoflists(tbl):
return tuple(list(row) for row in tbl)
Table.tupleoflists = tupleoflists
Table.tol = tupleoflists
def columns(table, missing=None):
"""
Construct a :class:`dict` mapping field names to lists of values. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]
>>> cols = etl.columns(table)
>>> cols['foo']
['a', 'b', 'b']
>>> cols['bar']
[1, 2, 3]
See also :func:`petl.util.materialise.facetcolumns`.
"""
cols = OrderedDict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
for f in flds:
cols[f] = list()
for row in it:
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return cols
Table.columns = columns
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
Table.facetcolumns = facetcolumns
def cache(table, n=None):
"""
Wrap the table with a cache that caches up to `n` rows as they are initially
requested via iteration (cache all rows be default).
"""
return CacheView(table, n=n)
Table.cache = cache
class CacheView(Table):
def __init__(self, inner, n=None):
self.inner = inner
self.n = n
self.cache = list()
self.cachecomplete = False
def clearcache(self):
self.cache = list()
self.cachecomplete = False
def __iter__(self):
# serve whatever is in the cache first
for row in self.cache:
yield row
if not self.cachecomplete:
# serve the remainder from the inner iterator
it = iter(self.inner)
for row in islice(it, len(self.cache), None):
# maybe there's more room in the cache?
if not self.n or len(self.cache) < self.n:
self.cache.append(row)
yield row
# does the cache contain a complete copy of the inner table?
if not self.n or len(self.cache) < self.n:
| self.cachecomplete = True | conditional_block |
|
materialise.py | from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from itertools import islice
from petl.compat import izip_longest, text_type, next
from petl.util.base import asindices, Table
def listoflists(tbl):
return [list(row) for row in tbl]
Table.listoflists = listoflists
Table.lol = listoflists
def tupleoftuples(tbl):
return tuple(tuple(row) for row in tbl)
Table.tupleoftuples = tupleoftuples
Table.tot = tupleoftuples
def listoftuples(tbl):
return [tuple(row) for row in tbl]
Table.listoftuples = listoftuples
Table.lot = listoftuples
def tupleoflists(tbl):
return tuple(list(row) for row in tbl)
Table.tupleoflists = tupleoflists
Table.tol = tupleoflists
def columns(table, missing=None):
"""
Construct a :class:`dict` mapping field names to lists of values. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]
>>> cols = etl.columns(table)
>>> cols['foo']
['a', 'b', 'b']
>>> cols['bar']
[1, 2, 3]
See also :func:`petl.util.materialise.facetcolumns`.
"""
cols = OrderedDict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
for f in flds:
cols[f] = list()
for row in it:
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return cols
Table.columns = columns
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
Table.facetcolumns = facetcolumns
def cache(table, n=None):
"""
Wrap the table with a cache that caches up to `n` rows as they are initially
requested via iteration (cache all rows be default).
"""
return CacheView(table, n=n)
Table.cache = cache
class CacheView(Table):
def __init__(self, inner, n=None):
self.inner = inner
self.n = n
self.cache = list()
self.cachecomplete = False
def clearcache(self):
self.cache = list()
self.cachecomplete = False
def __iter__(self):
# serve whatever is in the cache first
| for row in self.cache:
yield row
if not self.cachecomplete:
# serve the remainder from the inner iterator
it = iter(self.inner)
for row in islice(it, len(self.cache), None):
# maybe there's more room in the cache?
if not self.n or len(self.cache) < self.n:
self.cache.append(row)
yield row
# does the cache contain a complete copy of the inner table?
if not self.n or len(self.cache) < self.n:
self.cachecomplete = True | identifier_body |
|
weaktuple.py | """tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
|
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
| return count | conditional_block |
weaktuple.py | """tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def | ( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
| unwrap | identifier_name |
weaktuple.py | """tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple""" | return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__()) | random_line_split |
|
weaktuple.py | """tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
|
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
| """Compare the tuple to another (==)"""
return list(self) == sequence | identifier_body |
Accra.py | '''tzinfo timezone information for Africa/Accra.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Accra(DstTzInfo):
'''Africa/Accra timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Accra'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,1,1,0,0,52),
d(1936,9,1,0,0,0),
d(1936,12,30,23,40,0),
d(1937,9,1,0,0,0),
d(1937,12,30,23,40,0),
d(1938,9,1,0,0,0),
d(1938,12,30,23,40,0),
d(1939,9,1,0,0,0),
d(1939,12,30,23,40,0),
d(1940,9,1,0,0,0),
d(1940,12,30,23,40,0),
d(1941,9,1,0,0,0),
d(1941,12,30,23,40,0),
d(1942,9,1,0,0,0),
d(1942,12,30,23,40,0),
]
_transition_info = [
i(-60,0,'LMT'), | i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
]
Accra = Accra() | i(0,0,'GMT'), | random_line_split |
Accra.py | '''tzinfo timezone information for Africa/Accra.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class | (DstTzInfo):
'''Africa/Accra timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Accra'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,1,1,0,0,52),
d(1936,9,1,0,0,0),
d(1936,12,30,23,40,0),
d(1937,9,1,0,0,0),
d(1937,12,30,23,40,0),
d(1938,9,1,0,0,0),
d(1938,12,30,23,40,0),
d(1939,9,1,0,0,0),
d(1939,12,30,23,40,0),
d(1940,9,1,0,0,0),
d(1940,12,30,23,40,0),
d(1941,9,1,0,0,0),
d(1941,12,30,23,40,0),
d(1942,9,1,0,0,0),
d(1942,12,30,23,40,0),
]
_transition_info = [
i(-60,0,'LMT'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
]
Accra = Accra()
| Accra | identifier_name |
Accra.py | '''tzinfo timezone information for Africa/Accra.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Accra(DstTzInfo):
|
Accra = Accra()
| '''Africa/Accra timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Accra'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,1,1,0,0,52),
d(1936,9,1,0,0,0),
d(1936,12,30,23,40,0),
d(1937,9,1,0,0,0),
d(1937,12,30,23,40,0),
d(1938,9,1,0,0,0),
d(1938,12,30,23,40,0),
d(1939,9,1,0,0,0),
d(1939,12,30,23,40,0),
d(1940,9,1,0,0,0),
d(1940,12,30,23,40,0),
d(1941,9,1,0,0,0),
d(1941,12,30,23,40,0),
d(1942,9,1,0,0,0),
d(1942,12,30,23,40,0),
]
_transition_info = [
i(-60,0,'LMT'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
i(1200,1200,'GHST'),
i(0,0,'GMT'),
] | identifier_body |
models.py | import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
from django.core.mail import EmailMultiAlternatives
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __unicode__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(
user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
# sometimes other thread already created this
setting, created = NoticeSetting.objects.get_or_create(
user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
if not user.is_active:
return False
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if archived:
qs = self.filter(user=user)
else:
qs = self.filter(user=user, archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, user, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(user, unseen=True, **kwargs).count()
class Notice(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'))
objects = NoticeManager()
def __unicode__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
current_site = Site.objects.get_current()
notices_url = u"http://%s%s" % (
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'email_full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
'MEDIA_URL': settings.MEDIA_URL,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html = render_to_string('notification/email_body.html',{
'message': messages['email_full.html'],
}, context)
#notice = Notice.objects.create(user=user, message=messages['notice.html'], notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email \
and user.is_active: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
recipients)
msg.attach_alternative(html, "text/html")
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal): | to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self):
send([self.user], self.notice_type.label,
{'observed': self.observed_object})
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save'):
"""
Send a notice for each registered user about an observed object.
"""
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice()
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance) | """
Returns all ObservedItems for an observed object, | random_line_split |
models.py | import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
from django.core.mail import EmailMultiAlternatives
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __unicode__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(
user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
# sometimes other thread already created this
setting, created = NoticeSetting.objects.get_or_create(
user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
if not user.is_active:
return False
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if archived:
qs = self.filter(user=user)
else:
qs = self.filter(user=user, archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, user, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(user, unseen=True, **kwargs).count()
class Notice(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'))
objects = NoticeManager()
def __unicode__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
|
notice_type = NoticeType.objects.get(label=label)
current_site = Site.objects.get_current()
notices_url = u"http://%s%s" % (
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'email_full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
'MEDIA_URL': settings.MEDIA_URL,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html = render_to_string('notification/email_body.html',{
'message': messages['email_full.html'],
}, context)
#notice = Notice.objects.create(user=user, message=messages['notice.html'], notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email \
and user.is_active: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
recipients)
msg.attach_alternative(html, "text/html")
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self):
send([self.user], self.notice_type.label,
{'observed': self.observed_object})
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save'):
"""
Send a notice for each registered user about an observed object.
"""
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice()
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
| extra_context = {} | conditional_block |
models.py | import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
from django.core.mail import EmailMultiAlternatives
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __unicode__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(
user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
# sometimes other thread already created this
setting, created = NoticeSetting.objects.get_or_create(
user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
if not user.is_active:
return False
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if archived:
qs = self.filter(user=user)
else:
qs = self.filter(user=user, archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, user, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(user, unseen=True, **kwargs).count()
class Notice(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'))
objects = NoticeManager()
def __unicode__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def | (self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
current_site = Site.objects.get_current()
notices_url = u"http://%s%s" % (
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'email_full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
'MEDIA_URL': settings.MEDIA_URL,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html = render_to_string('notification/email_body.html',{
'message': messages['email_full.html'],
}, context)
#notice = Notice.objects.create(user=user, message=messages['notice.html'], notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email \
and user.is_active: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
recipients)
msg.attach_alternative(html, "text/html")
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self):
send([self.user], self.notice_type.label,
{'observed': self.observed_object})
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save'):
"""
Send a notice for each registered user about an observed object.
"""
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice()
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
| is_unseen | identifier_name |
models.py | import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
from django.core.mail import EmailMultiAlternatives
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __unicode__(self):
|
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(
user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
# sometimes other thread already created this
setting, created = NoticeSetting.objects.get_or_create(
user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
if not user.is_active:
return False
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if archived:
qs = self.filter(user=user)
else:
qs = self.filter(user=user, archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, user, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(user, unseen=True, **kwargs).count()
class Notice(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'))
objects = NoticeManager()
def __unicode__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
current_site = Site.objects.get_current()
notices_url = u"http://%s%s" % (
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'email_full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
'MEDIA_URL': settings.MEDIA_URL,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html = render_to_string('notification/email_body.html',{
'message': messages['email_full.html'],
}, context)
#notice = Notice.objects.create(user=user, message=messages['notice.html'], notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email \
and user.is_active: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
recipients)
msg.attach_alternative(html, "text/html")
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self):
send([self.user], self.notice_type.label,
{'observed': self.observed_object})
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save'):
"""
Send a notice for each registered user about an observed object.
"""
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice()
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
| return self.label | identifier_body |
modules.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use ethcore::client::BlockChainClient;
use hypervisor::Hypervisor;
use ethsync::{SyncConfig, NetworkConfiguration, NetworkError};
use ethcore::snapshot::SnapshotService;
#[cfg(not(feature="ipc"))]
use self::no_ipc_deps::*;
#[cfg(feature="ipc")]
use self::ipc_deps::*;
use ethcore_logger::Config as LogConfig;
use std::path::Path;
#[cfg(feature="ipc")]
pub mod service_urls {
use std::path::PathBuf;
pub const CLIENT: &'static str = "parity-chain.ipc";
pub const SNAPSHOT: &'static str = "parity-snapshot.ipc";
pub const SYNC: &'static str = "parity-sync.ipc";
pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc";
pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc";
pub const SYNC_CONTROL: &'static str = "parity-sync-control.ipc";
#[cfg(feature="stratum")]
pub const STRATUM: &'static str = "parity-stratum.ipc";
#[cfg(feature="stratum")]
pub const MINING_JOB_DISPATCHER: &'static str = "parity-mining-jobs.ipc";
pub fn with_base(data_dir: &str, service_path: &str) -> String {
let mut path = PathBuf::from(data_dir);
path.push(service_path);
format!("ipc://{}", path.to_str().unwrap())
}
}
#[cfg(not(feature="ipc"))]
mod no_ipc_deps {
pub use ethsync::{EthSync, SyncProvider, ManageNetwork};
pub use ethcore::client::ChainNotify;
}
#[cfg(feature="ipc")]
pub type SyncModules = (
GuardedSocket<SyncClient<NanoSocket>>,
GuardedSocket<NetworkManagerClient<NanoSocket>>,
GuardedSocket<ChainNotifyClient<NanoSocket>>
);
#[cfg(not(feature="ipc"))]
pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
#[cfg(feature="ipc")]
mod ipc_deps {
pub use ethsync::remote::{SyncClient, NetworkManagerClient};
pub use ethsync::ServiceConfiguration;
pub use ethcore::client::remote::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client};
pub use ipc::IpcSocket;
pub use ipc::binary::serialize;
}
#[cfg(feature="ipc")]
pub fn hypervisor(base_path: &Path) -> Option<Hypervisor> {
Some(Hypervisor
::with_url(&service_urls::with_base(base_path.to_str().unwrap(), HYPERVISOR_IPC_URL))
.io_path(base_path.to_str().unwrap()))
}
#[cfg(not(feature="ipc"))]
pub fn hypervisor(_: &Path) -> Option<Hypervisor> {
None
}
#[cfg(feature="ipc")]
fn | (io_path: &str, sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
let service_config = ServiceConfiguration {
sync: sync_cfg,
net: net_cfg,
io_path: io_path.to_owned(),
};
// initialisation payload is passed via stdin
let service_payload = serialize(&service_config).expect("Any binary-derived struct is serializable by definition");
// client service url and logging settings are passed in command line
let mut cli_args = Vec::new();
cli_args.push("sync".to_owned());
if !log_settings.color { cli_args.push("--no-color".to_owned()); }
if let Some(ref mode) = log_settings.mode {
cli_args.push("-l".to_owned());
cli_args.push(mode.to_owned());
}
if let Some(ref file) = log_settings.file {
cli_args.push("--log-file".to_owned());
cli_args.push(file.to_owned());
}
BootArgs::new().stdin(service_payload).cli(cli_args)
}
#[cfg(feature="ipc")]
pub fn sync
(
hypervisor_ref: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
_client: Arc<BlockChainClient>,
_snapshot_service: Arc<SnapshotService>,
log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration");
let args = sync_arguments(&hypervisor.io_path, sync_cfg, net_cfg, log_settings);
hypervisor = hypervisor.module(SYNC_MODULE_ID, args);
hypervisor.start();
hypervisor.wait_for_startup();
let sync_client = generic_client::<SyncClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap();
let notify_client = generic_client::<ChainNotifyClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap();
let manage_client = generic_client::<NetworkManagerClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap();
*hypervisor_ref = Some(hypervisor);
Ok((sync_client, manage_client, notify_client))
}
#[cfg(not(feature="ipc"))]
pub fn sync
(
_hypervisor: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>,
snapshot_service: Arc<SnapshotService>,
_log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg));
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
}
| sync_arguments | identifier_name |
modules.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use ethcore::client::BlockChainClient;
use hypervisor::Hypervisor;
use ethsync::{SyncConfig, NetworkConfiguration, NetworkError};
use ethcore::snapshot::SnapshotService;
#[cfg(not(feature="ipc"))]
use self::no_ipc_deps::*;
#[cfg(feature="ipc")]
use self::ipc_deps::*;
use ethcore_logger::Config as LogConfig;
use std::path::Path;
#[cfg(feature="ipc")]
pub mod service_urls {
use std::path::PathBuf;
pub const CLIENT: &'static str = "parity-chain.ipc";
pub const SNAPSHOT: &'static str = "parity-snapshot.ipc";
pub const SYNC: &'static str = "parity-sync.ipc";
pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc";
pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc";
pub const SYNC_CONTROL: &'static str = "parity-sync-control.ipc";
#[cfg(feature="stratum")]
pub const STRATUM: &'static str = "parity-stratum.ipc";
#[cfg(feature="stratum")]
pub const MINING_JOB_DISPATCHER: &'static str = "parity-mining-jobs.ipc";
pub fn with_base(data_dir: &str, service_path: &str) -> String {
let mut path = PathBuf::from(data_dir);
path.push(service_path);
format!("ipc://{}", path.to_str().unwrap())
}
}
#[cfg(not(feature="ipc"))]
mod no_ipc_deps {
pub use ethsync::{EthSync, SyncProvider, ManageNetwork};
pub use ethcore::client::ChainNotify;
}
#[cfg(feature="ipc")]
pub type SyncModules = (
GuardedSocket<SyncClient<NanoSocket>>,
GuardedSocket<NetworkManagerClient<NanoSocket>>,
GuardedSocket<ChainNotifyClient<NanoSocket>>
);
#[cfg(not(feature="ipc"))]
pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
#[cfg(feature="ipc")]
mod ipc_deps {
pub use ethsync::remote::{SyncClient, NetworkManagerClient};
pub use ethsync::ServiceConfiguration;
pub use ethcore::client::remote::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client};
pub use ipc::IpcSocket;
pub use ipc::binary::serialize;
}
#[cfg(feature="ipc")]
pub fn hypervisor(base_path: &Path) -> Option<Hypervisor> {
Some(Hypervisor
::with_url(&service_urls::with_base(base_path.to_str().unwrap(), HYPERVISOR_IPC_URL))
.io_path(base_path.to_str().unwrap()))
}
#[cfg(not(feature="ipc"))]
pub fn hypervisor(_: &Path) -> Option<Hypervisor> |
#[cfg(feature="ipc")]
fn sync_arguments(io_path: &str, sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
let service_config = ServiceConfiguration {
sync: sync_cfg,
net: net_cfg,
io_path: io_path.to_owned(),
};
// initialisation payload is passed via stdin
let service_payload = serialize(&service_config).expect("Any binary-derived struct is serializable by definition");
// client service url and logging settings are passed in command line
let mut cli_args = Vec::new();
cli_args.push("sync".to_owned());
if !log_settings.color { cli_args.push("--no-color".to_owned()); }
if let Some(ref mode) = log_settings.mode {
cli_args.push("-l".to_owned());
cli_args.push(mode.to_owned());
}
if let Some(ref file) = log_settings.file {
cli_args.push("--log-file".to_owned());
cli_args.push(file.to_owned());
}
BootArgs::new().stdin(service_payload).cli(cli_args)
}
#[cfg(feature="ipc")]
pub fn sync
(
hypervisor_ref: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
_client: Arc<BlockChainClient>,
_snapshot_service: Arc<SnapshotService>,
log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration");
let args = sync_arguments(&hypervisor.io_path, sync_cfg, net_cfg, log_settings);
hypervisor = hypervisor.module(SYNC_MODULE_ID, args);
hypervisor.start();
hypervisor.wait_for_startup();
let sync_client = generic_client::<SyncClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap();
let notify_client = generic_client::<ChainNotifyClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap();
let manage_client = generic_client::<NetworkManagerClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap();
*hypervisor_ref = Some(hypervisor);
Ok((sync_client, manage_client, notify_client))
}
#[cfg(not(feature="ipc"))]
pub fn sync
(
_hypervisor: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>,
snapshot_service: Arc<SnapshotService>,
_log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg));
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
}
| {
None
} | identifier_body |
modules.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use ethcore::client::BlockChainClient;
use hypervisor::Hypervisor;
use ethsync::{SyncConfig, NetworkConfiguration, NetworkError};
use ethcore::snapshot::SnapshotService;
#[cfg(not(feature="ipc"))]
use self::no_ipc_deps::*;
#[cfg(feature="ipc")]
use self::ipc_deps::*;
use ethcore_logger::Config as LogConfig;
use std::path::Path;
#[cfg(feature="ipc")]
pub mod service_urls {
use std::path::PathBuf;
pub const CLIENT: &'static str = "parity-chain.ipc";
pub const SNAPSHOT: &'static str = "parity-snapshot.ipc";
pub const SYNC: &'static str = "parity-sync.ipc";
pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc";
pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc";
pub const SYNC_CONTROL: &'static str = "parity-sync-control.ipc";
#[cfg(feature="stratum")]
pub const STRATUM: &'static str = "parity-stratum.ipc";
#[cfg(feature="stratum")]
pub const MINING_JOB_DISPATCHER: &'static str = "parity-mining-jobs.ipc";
pub fn with_base(data_dir: &str, service_path: &str) -> String {
let mut path = PathBuf::from(data_dir);
path.push(service_path);
format!("ipc://{}", path.to_str().unwrap())
}
}
#[cfg(not(feature="ipc"))]
mod no_ipc_deps {
pub use ethsync::{EthSync, SyncProvider, ManageNetwork};
pub use ethcore::client::ChainNotify;
}
#[cfg(feature="ipc")]
pub type SyncModules = (
GuardedSocket<SyncClient<NanoSocket>>,
GuardedSocket<NetworkManagerClient<NanoSocket>>,
GuardedSocket<ChainNotifyClient<NanoSocket>>
);
#[cfg(not(feature="ipc"))]
pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
#[cfg(feature="ipc")]
mod ipc_deps {
pub use ethsync::remote::{SyncClient, NetworkManagerClient};
pub use ethsync::ServiceConfiguration;
pub use ethcore::client::remote::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client}; | pub use ipc::IpcSocket;
pub use ipc::binary::serialize;
}
#[cfg(feature="ipc")]
pub fn hypervisor(base_path: &Path) -> Option<Hypervisor> {
Some(Hypervisor
::with_url(&service_urls::with_base(base_path.to_str().unwrap(), HYPERVISOR_IPC_URL))
.io_path(base_path.to_str().unwrap()))
}
#[cfg(not(feature="ipc"))]
pub fn hypervisor(_: &Path) -> Option<Hypervisor> {
None
}
#[cfg(feature="ipc")]
fn sync_arguments(io_path: &str, sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
let service_config = ServiceConfiguration {
sync: sync_cfg,
net: net_cfg,
io_path: io_path.to_owned(),
};
// initialisation payload is passed via stdin
let service_payload = serialize(&service_config).expect("Any binary-derived struct is serializable by definition");
// client service url and logging settings are passed in command line
let mut cli_args = Vec::new();
cli_args.push("sync".to_owned());
if !log_settings.color { cli_args.push("--no-color".to_owned()); }
if let Some(ref mode) = log_settings.mode {
cli_args.push("-l".to_owned());
cli_args.push(mode.to_owned());
}
if let Some(ref file) = log_settings.file {
cli_args.push("--log-file".to_owned());
cli_args.push(file.to_owned());
}
BootArgs::new().stdin(service_payload).cli(cli_args)
}
#[cfg(feature="ipc")]
pub fn sync
(
hypervisor_ref: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
_client: Arc<BlockChainClient>,
_snapshot_service: Arc<SnapshotService>,
log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration");
let args = sync_arguments(&hypervisor.io_path, sync_cfg, net_cfg, log_settings);
hypervisor = hypervisor.module(SYNC_MODULE_ID, args);
hypervisor.start();
hypervisor.wait_for_startup();
let sync_client = generic_client::<SyncClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap();
let notify_client = generic_client::<ChainNotifyClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap();
let manage_client = generic_client::<NetworkManagerClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap();
*hypervisor_ref = Some(hypervisor);
Ok((sync_client, manage_client, notify_client))
}
#[cfg(not(feature="ipc"))]
pub fn sync
(
_hypervisor: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>,
snapshot_service: Arc<SnapshotService>,
_log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg));
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
} | random_line_split |
|
modules.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use ethcore::client::BlockChainClient;
use hypervisor::Hypervisor;
use ethsync::{SyncConfig, NetworkConfiguration, NetworkError};
use ethcore::snapshot::SnapshotService;
#[cfg(not(feature="ipc"))]
use self::no_ipc_deps::*;
#[cfg(feature="ipc")]
use self::ipc_deps::*;
use ethcore_logger::Config as LogConfig;
use std::path::Path;
#[cfg(feature="ipc")]
pub mod service_urls {
use std::path::PathBuf;
pub const CLIENT: &'static str = "parity-chain.ipc";
pub const SNAPSHOT: &'static str = "parity-snapshot.ipc";
pub const SYNC: &'static str = "parity-sync.ipc";
pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc";
pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc";
pub const SYNC_CONTROL: &'static str = "parity-sync-control.ipc";
#[cfg(feature="stratum")]
pub const STRATUM: &'static str = "parity-stratum.ipc";
#[cfg(feature="stratum")]
pub const MINING_JOB_DISPATCHER: &'static str = "parity-mining-jobs.ipc";
pub fn with_base(data_dir: &str, service_path: &str) -> String {
let mut path = PathBuf::from(data_dir);
path.push(service_path);
format!("ipc://{}", path.to_str().unwrap())
}
}
#[cfg(not(feature="ipc"))]
mod no_ipc_deps {
pub use ethsync::{EthSync, SyncProvider, ManageNetwork};
pub use ethcore::client::ChainNotify;
}
#[cfg(feature="ipc")]
pub type SyncModules = (
GuardedSocket<SyncClient<NanoSocket>>,
GuardedSocket<NetworkManagerClient<NanoSocket>>,
GuardedSocket<ChainNotifyClient<NanoSocket>>
);
#[cfg(not(feature="ipc"))]
pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
#[cfg(feature="ipc")]
mod ipc_deps {
pub use ethsync::remote::{SyncClient, NetworkManagerClient};
pub use ethsync::ServiceConfiguration;
pub use ethcore::client::remote::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client};
pub use ipc::IpcSocket;
pub use ipc::binary::serialize;
}
#[cfg(feature="ipc")]
pub fn hypervisor(base_path: &Path) -> Option<Hypervisor> {
Some(Hypervisor
::with_url(&service_urls::with_base(base_path.to_str().unwrap(), HYPERVISOR_IPC_URL))
.io_path(base_path.to_str().unwrap()))
}
#[cfg(not(feature="ipc"))]
pub fn hypervisor(_: &Path) -> Option<Hypervisor> {
None
}
#[cfg(feature="ipc")]
fn sync_arguments(io_path: &str, sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs {
let service_config = ServiceConfiguration {
sync: sync_cfg,
net: net_cfg,
io_path: io_path.to_owned(),
};
// initialisation payload is passed via stdin
let service_payload = serialize(&service_config).expect("Any binary-derived struct is serializable by definition");
// client service url and logging settings are passed in command line
let mut cli_args = Vec::new();
cli_args.push("sync".to_owned());
if !log_settings.color { cli_args.push("--no-color".to_owned()); }
if let Some(ref mode) = log_settings.mode {
cli_args.push("-l".to_owned());
cli_args.push(mode.to_owned());
}
if let Some(ref file) = log_settings.file |
BootArgs::new().stdin(service_payload).cli(cli_args)
}
#[cfg(feature="ipc")]
pub fn sync
(
hypervisor_ref: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
_client: Arc<BlockChainClient>,
_snapshot_service: Arc<SnapshotService>,
log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration");
let args = sync_arguments(&hypervisor.io_path, sync_cfg, net_cfg, log_settings);
hypervisor = hypervisor.module(SYNC_MODULE_ID, args);
hypervisor.start();
hypervisor.wait_for_startup();
let sync_client = generic_client::<SyncClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap();
let notify_client = generic_client::<ChainNotifyClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap();
let manage_client = generic_client::<NetworkManagerClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap();
*hypervisor_ref = Some(hypervisor);
Ok((sync_client, manage_client, notify_client))
}
#[cfg(not(feature="ipc"))]
pub fn sync
(
_hypervisor: &mut Option<Hypervisor>,
sync_cfg: SyncConfig,
net_cfg: NetworkConfiguration,
client: Arc<BlockChainClient>,
snapshot_service: Arc<SnapshotService>,
_log_settings: &LogConfig,
)
-> Result<SyncModules, NetworkError>
{
let eth_sync = try!(EthSync::new(sync_cfg, client, snapshot_service, net_cfg));
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
}
| {
cli_args.push("--log-file".to_owned());
cli_args.push(file.to_owned());
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.