file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
elasticsearch_adapter.js | (config) {
if (typeof config !== 'object' || Object.keys(config).length === 0) {
throw new NexxusError(NexxusError.errors.ServerFailure, ['supplied empty or invalid configuration parameter']);
}
let esConfig = {
maxRetries: 10,
deadTimeout: 1e4,
pingTimeout: 3000,
keepAlive: true,
maxSockets: 300,
createNodeAgent (connection, config) {
return new AgentKeepAlive(connection.makeAgentConfig(config));
}
};
if (config.hosts) {
esConfig.hosts = config.hosts;
} else if (config.host) {
esConfig.host = config.host;
esConfig.sniffOnStart = true;
esConfig.sniffInterval = 30000;
esConfig.sniffOnConnectionFault = true;
}
const esApi = elasticsearch.Client.apis._default;
const disconnectFunctionHandler = e => {
if (e.message === 'No Living connections' && !this.reconnecting) {
this.reconnecting = true;
Services.logger.emergency(`Lost connection to elasticsearch: ${e.message}`);
setTimeout(() => {
this[tryConnectionMethod]();
}, 2000);
this.emit('disconnect');
}
throw e;
};
for (const func in esApi) {
if (esApi[func] && esApi[func].name === 'action') {
esApi[func] = new Proxy(esApi[func], {
apply: (target, ctx, args) => {
// this will replace the original callback
// when something bad happens normal operations are disrupted, thus we also emit a disconnected event
// so the application knows something went wrong
const lastArg = args.pop();
// also the ES library supports both callback and promises
if (lastArg instanceof Function) {
args.push((err, res) => {
if (err) {
if (err.message.startsWith('Request Timeout')) {
this.connected = false;
this.reconnecting = true;
return lastArg(err);
}
return disconnectFunctionHandler(err);
}
return lastArg(null, res);
});
return Reflect.apply(target, ctx, args);
}
args.push(lastArg);
return Reflect.apply(target, ctx, args).catch(disconnectFunctionHandler);
}
});
}
}
super(new elasticsearch.Client(esConfig));
this.config = config;
this.config.subscribe_limit = this.config.subscribe_limit || 64;
this.config.get_limit = this.config.get_limit || 384;
this.connected = false;
this.reconnecting = false;
this[tryConnectionMethod]();
}
[tryConnectionMethod] () {
let error = false;
async.doWhilst(callback => {
this.connection.ping({}, (err, res) => {
if (!err) {
Services.logger.info('Connected to ElasticSearch MainDatabase');
this.connected = true;
return setImmediate(callback);
}
if (err.message === 'No Living connections') {
Services.logger.error(`Failed connecting to Elasticsearch "${this.config.host || this.config.hosts.join(', ')}": ${err.message}. Retrying...`);
setTimeout(callback, 2000);
} else if (err.message.startsWith('Request Timeout')) {
Services.logger.error(`Failed connecting to Elasticsearch "${this.config.host || this.config.hosts.join(', ')}": ${err.message}. Retrying...`);
setTimeout(callback, 2000);
} else {
error = err;
Services.logger.emergency(`Connection to ElasticSearch failed: ${err.message}`);
setImmediate(callback);
}
return null;
});
}, () => this.connected === false && error === false, () => {
if (error) {
this.emit('error', error);
} else {
if (this.reconnecting === true) {
this.emit('reconnected');
} else {
this.emit('ready');
}
this.reconnecting = false;
}
});
}
async [processSchemaModificationMethod] (applicationId, modifications) {
if (modifications.added.schema) {
const addedModels = Object.keys(modifications.added.schema);
await addedModels.reduce(async (promise, modelName) => {
await promise;
try {
await this.connection.indices.create({
index: `${constants.CHANNEL_KEY_PREFIX}-${applicationId}-${modelName}`
});
Services.logger.debug(`Successfully created index: "${constants.CHANNEL_KEY_PREFIX}-${applicationId}-${modelName}"`);
} catch (err) {
Services.logger.warning(`Index already exists: "${constants.CHANNEL_KEY_PREFIX}-${applicationId}-${modelName}"`);
}
return Promise.resolve();
}, Promise.resolve());
}
if (modifications.deleted.schema) {
const removedModels = Object.keys(modifications.deleted.schema);
const indicesToRemove = removedModels.map(modelName => `${constants.CHANNEL_KEY_PREFIX}-${applicationId}-${modelName}`);
try {
await this.connection.indices.delete({
index: indicesToRemove
});
Services.logger.debug(`Successfully removed indices: "${indicesToRemove}"`);
} catch (err) {
Services.logger.warning(`Error when trying to remove indices: ${err}`);
}
}
}
/**
*
* @param {FilterBuilder} builder
* @return {Object} The result of <code>builder.build()</code> but with a few translations for ES
*/
getQueryObject (builder) {
const translationMappings = {
is: 'term',
not: 'not',
exists: 'exists',
range: 'range',
in_array: 'terms',
like: 'regexp'
};
function Translate (node) {
node.children.forEach(child => {
if (child instanceof BuilderNode) {
Translate(child);
} else {
let replaced = Object.keys(child)[0];
if (translationMappings[replaced]) {
// 'not' contains a filter name
if (replaced === 'not') {
let secondReplaced = Object.keys(child[replaced])[0];
if (translationMappings[secondReplaced] !== secondReplaced) {
child[replaced][translationMappings[secondReplaced]] = cloneObject(child[replaced][secondReplaced]);
delete child[replaced][secondReplaced];
}
} else if (replaced === 'like') {
child[translationMappings[replaced]] = cloneObject(child[replaced]);
let fieldObj = {};
Object.keys(child[translationMappings[replaced]]).forEach(field => {
fieldObj[field] = `.*${escapeRegExp(child[translationMappings[replaced]][field])}.*`;
});
child[translationMappings[replaced]] = fieldObj;
delete child[replaced];
} else if (translationMappings[replaced] !== replaced) {
child[translationMappings[replaced]] = cloneObject(child[replaced]);
delete child[replaced];
}
}
}
});
}
Translate(builder.root);
return builder.build();
}
async getObjects (items) {
if (!Array.isArray(items) || items.length === 0) {
throw new NexxusError(NexxusError.errors.InvalidFieldValue, 'ElasticSearchDB.getObjects: "ids" should be a non-empty array');
}
const docs = items.map(object => {
let index;
switch (object.type) {
case 'application':
case 'admin': {
index = `${constants.CHANNEL_KEY_PREFIX}-${object.type}`;
break;
}
default: {
index = `${constants.CHANNEL_KEY_PREFIX}-${object.application_id}-${object.type}`;
}
}
return {
_id: object.id,
_index: index
};
}, this);
const results = await this.connection.mget({
body: {
docs
}
});
let errors = [];
let objects = [];
let versions = new Map();
results.docs.forEach(result => {
if (result.found) {
objects.push(result._source);
versions.set(result._id, result._version);
} else {
errors.push(new NexxusError(NexxusError.errors.ObjectNotFound, [result._id]));
}
});
return {errors, results: objects, versions};
}
async searchObjects (options) {
let index;
const reqBody = {
query: {
filtered: {
filter: {}
}
}
};
switch (options.modelName) {
case 'application':
case 'admin': {
index = `${constants.CHANNEL_KEY_PREFIX}-${options.modelName}`;
break;
}
default: {
if (Array.isArray(options.modelName)) {
index = options.modelName.map(model => {
return `${constants.CHANNEL_KEY_PREFIX}-${options.applicationId}-${model}`;
}).join(',');
} else {
index = `${constants.CHANNEL_KEY_PREFIX}-${options.applicationId}-${options.modelName}`;
}
}
}
if (options.filters && !options.filters.isEmpty()) {
reqBody | constructor | identifier_name |
|
elasticsearch_adapter.js | field])}.*`;
});
child[translationMappings[replaced]] = fieldObj;
delete child[replaced];
} else if (translationMappings[replaced] !== replaced) {
child[translationMappings[replaced]] = cloneObject(child[replaced]);
delete child[replaced];
}
}
}
});
}
Translate(builder.root);
return builder.build();
}
async getObjects (items) {
if (!Array.isArray(items) || items.length === 0) {
throw new NexxusError(NexxusError.errors.InvalidFieldValue, 'ElasticSearchDB.getObjects: "ids" should be a non-empty array');
}
const docs = items.map(object => {
let index;
switch (object.type) {
case 'application':
case 'admin': {
index = `${constants.CHANNEL_KEY_PREFIX}-${object.type}`;
break;
}
default: {
index = `${constants.CHANNEL_KEY_PREFIX}-${object.application_id}-${object.type}`;
}
}
return {
_id: object.id,
_index: index
};
}, this);
const results = await this.connection.mget({
body: {
docs
}
});
let errors = [];
let objects = [];
let versions = new Map();
results.docs.forEach(result => {
if (result.found) {
objects.push(result._source);
versions.set(result._id, result._version);
} else {
errors.push(new NexxusError(NexxusError.errors.ObjectNotFound, [result._id]));
}
});
return {errors, results: objects, versions};
}
async searchObjects (options) {
let index;
const reqBody = {
query: {
filtered: {
filter: {}
}
}
};
switch (options.modelName) {
case 'application':
case 'admin': {
index = `${constants.CHANNEL_KEY_PREFIX}-${options.modelName}`;
break;
}
default: {
if (Array.isArray(options.modelName)) {
index = options.modelName.map(model => {
return `${constants.CHANNEL_KEY_PREFIX}-${options.applicationId}-${model}`;
}).join(',');
} else {
index = `${constants.CHANNEL_KEY_PREFIX}-${options.applicationId}-${options.modelName}`;
}
}
}
if (options.filters && !options.filters.isEmpty()) {
reqBody.query = this.getQueryObject(options.filters);
} else {
reqBody.query = {match_all: {}};
}
if (options.fields) {
if (!(options.scanFunction instanceof Function)) {
throw new NexxusError(NexxusError.errors.ServerFailure, ['searchObjects was provided with fields but no scanFunction']);
}
let hitsCollected = 0;
let response = await this.connection.search({
index,
body: reqBody,
scroll: '10s',
fields: options.fields,
size: 1024
});
do {
let objects = [];
hitsCollected += response.hits.hits.length;
response.hits.hits.forEach(hit => {
let obj = {};
for (const f in hit.fields) {
obj[f] = hit.fields[f][0];
}
objects.push(obj);
});
if (response.hits.hits.length) {
await options.scanFunction(objects);
}
response = await this.connection.scroll({
scrollId: response._scroll_id,
scroll: '10s'
});
} while (response.hits.total !== hitsCollected);
return null;
}
if (options.sort) {
reqBody.sort = [];
Object.keys(options.sort).forEach(field => {
let sortObjectField = {};
if (!options.sort[field].type) {
sortObjectField[field] = { order: options.sort[field].order, unmapped_type: 'long' };
} else if (options.sort[field].type === 'geo') {
sortObjectField._geo_distance = {};
sortObjectField._geo_distance[field] = { lat: options.sort[field].poi.lat || 0.0, lon: options.sort[field].poi.long || 0.0 };
sortObjectField._geo_distance.order = options.sort[field].order;
}
reqBody.sort.push(sortObjectField);
});
}
const results = await this.connection.search({
index,
body: reqBody,
from: options.offset,
size: options.limit
});
return {results: results.hits.hits.map(object => object._source)};
}
async countObjects (modelName, options) {
let index;
let reqBody = {
query: {
filtered: {
filter: {}
}
}
};
switch (modelName) {
case 'application':
case 'admin': {
index = `${constants.CHANNEL_KEY_PREFIX}-${modelName}`;
break;
}
default: {
index = `${constants.CHANNEL_KEY_PREFIX}-${options.applicationId}-${modelName}`;
}
}
if (options.filters && !options.filters.isEmpty()) {
reqBody.query.filtered.filter = this.getQueryObject(options.filters);
}
if (options.aggregation) {
reqBody.aggs = { aggregation: options.aggregation };
const result = await this.connection.search({
index,
body: reqBody,
search_type: 'count',
queryCache: true
});
let countResult = { count: result.hits.total };
countResult.aggregation = result.aggregations.aggregation.value;
return Object.assign({ count: result.hits.total }, { aggregation: result.aggregations.aggregation.value });
}
const result = await this.connection.count({
index,
body: reqBody
});
return { count: result.count };
}
async createObjects (objects) {
if (!Array.isArray(objects) || objects.length === 0) {
throw new NexxusError('InvalidFieldValue', ['ElasticSearchDB.createObjects: "objects" should be a non-empty array']);
}
let shouldRefresh = false;
let bulk = [];
let errors = [];
await objects.reduce(async (promise, obj) => {
await promise;
let index;
switch (obj.type) {
case 'admin':
case 'application': {
index = `${constants.CHANNEL_KEY_PREFIX}-${obj.type}`;
shouldRefresh = true;
if (obj.schema) {
await Object.keys(obj.schema).reduce(async (p, modelName) => {
await p;
return this.connection.indices.create({
index: `${constants.CHANNEL_KEY_PREFIX}-${obj.id}-${modelName}`
});
}, Promise.resolve());
}
break;
}
default: {
index = `${constants.CHANNEL_KEY_PREFIX}-${obj.applicationId}-${obj.type}`;
}
}
bulk.push({ index: { _id: obj.id, _index: index, _type: '_doc' } });
bulk.push(obj);
return Promise.resolve();
}, Promise.resolve());
if (bulk.length !== objects.length * 2) {
Services.logger.warning(`ElasticSearchDB.createObjects: some objects were missing their "type" and "id" (${(objects.length - bulk.length / 2)} failed)`);
}
if (!bulk.length) {
return null;
}
const res = await this.connection.bulk({
body: bulk,
refresh: shouldRefresh
});
if (res.errors) {
res.items.forEach(error => {
errors.push(new NexxusError('ServerFailure', `Error creating ${error.index._type}: ${error.index.error}`));
});
}
return {errors};
}
async updateObjects (patches) {
if (!Array.isArray(patches) || patches.length === 0) {
throw new NexxusError(NexxusError.errors.InvalidFieldValue, 'ElasticSearchDB.updateObjects: "patches" should be a non-empty array');
}
let errors = [];
let shouldRefresh = false;
let finalResults = new Map();
async function getAndUpdate (objectPatches) {
let conflictedObjectPatches = [];
let objectsToGet = new Map();
let modifiedApplicationSchemas = new Map();
objectPatches.forEach(patch => {
if (!(patch instanceof NexxusPatch)) {
throw new TypeError('ElasticSearchDB.updateObjects: array elements must all be instances of NexxusPatch');
}
if (objectsToGet.has(patch.id)) {
objectsToGet.get(patch.id).patches.push(patch);
} else {
objectsToGet.set(patch.id, { id: patch.id, type: patch.model, application_id: patch.applicationId, patches: [patch] });
}
// we need to remember which application had its schema modified
if (patch.field === 'schema') {
modifiedApplicationSchemas.set(patch.id, true);
}
});
let bulk = [];
if (objectPatches.length === 0) {
return null;
}
let { errors: notFoundErrors, results, versions } = await this.getObjects(Array.from(objectsToGet.values()));
errors = errors.concat(notFoundErrors);
if (!results || !results.length) {
return null;
} | random_line_split |
||
index.js | = {};
static ctrl = null;
static lane1 = [];
static lane2 = [];
static crossing = 0;
static ui = {};
static creatorInterval = null;
static start() {
// if (Sim.ui.$sim === undefined) Sim.resetUI();
Sim.clearErrors();
Sim.loadUserInputs();
Sim.ui.$fieldset.disabled = true;
Sim.ctrl = new Controller();
Sim.ctrl.run();
Sim.initCreator();
Sim.redraw();
}
static stop() {
function freezeSimUI() {
// To replace all elements so no one can alter the current state
Sim.ui.$sim.outerHTML = Sim.ui.$sim.outerHTML;
// To signal that they no longer reference actual DOM elements and free them maybe(?)
Sim.ui = {};
}
function freeVariables() {
// Kill all user defined semaphores
Object.entries(Sim.userVars)
.map(entry => entry[1])
.filter(val => val instanceof Semaphore)
.forEach(sema => sema.die());
Sim.userVars = {};
}
function freeThreads() {
// TODO:
// - kill "threads" (maybe simply call .destory() of each existing Traverser)
// - cancel Controller 'ctrl' (AFAIK you can not cancel Promises...)
}
Sim.ui.$fieldset.disabled = false;
clearInterval(Sim.creatorInterval);
freezeSimUI();
//freeThreads();
//freeVariables();
}
static setup() {
const $ = str => document.querySelector(str);
Object.assign(Sim.ui, {
$form: $('form'),
$fieldset: $('form fieldset'),
$semaNames: $('#sema-names'),
$semaVals: $('#sema-vals'),
$intNames: $('#int-names'),
$intVals: $('#int-vals'),
$controller: $('#controller'),
$traverser1: $('#traverser1'),
$traverser2: $('#traverser2'),
$sim: $('#sim'),
$intersection: $('#intersection'),
})
Sim.ui.$form.onsubmit = (ev) => {
ev.preventDefault();
Sim.start();
}
$('#btn-load-attempt').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.myAttempt);
}
$('#btn-load-correct').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.correctAnswer);
}
}
static loadUserInputs() {
const {ui} = Sim;
// userAlgos...
Object.assign(Sim.userAlgos,
{
controller: ui.$controller.value,
traverser1: ui.$traverser1.value,
traverser2: ui.$traverser2.value,
}
)
// userVars...
const SEP = /,\s*/; // separator
const userSemas = zipObject(
ui.$semaNames.value.split(SEP),
ui.$semaVals.value.split(SEP).map(val => new Semaphore(Number(val)))
);
const userInts = zipObject(
ui.$intNames.value.split(SEP),
ui.$intVals.value.split(SEP).map(val => Number(val))
);
Object.assign(Sim.userVars, userSemas, userInts);
}
static initCreator() {
// maybe create a new instance of Traverser every second
Sim.creatorInterval = setInterval(Sim.maybeCreateTraverser, 1 * 1000);
}
/**
* Maybe create a new instance of Traverser and run it
* @returns {Traverser} The newly created traverser or null.
*/
static maybeCreateTraverser() {
// maybe not
if (Math.random() < 0.5) {
return null;
}
if (Math.random() < 0.5) {
if (Sim.lane1.length < Traverser1.MAX) {
const t1 = new Traverser1();
Sim.lane1.push(t1);
t1.run();
return t1;
}
} else {
if (Sim.lane2.length < Traverser2.MAX) {
const t2 = new Traverser2();
Sim.lane2.push(t2);
t2.run();
return t2;
}
}
// no room for a new 'Traverser' in the randomly chosen 'lane'
return null;
}
static redraw() {
// Just update 'data-*' and '--pos' values, and let CSS take care of the rest.
// Traffic light
const {ui, ctrl, userVars} = Sim;
ui.$sim.dataset.light = userVars.light;
ui.$sim.dataset.ctrlQueued = ctrl.orderVec.some(sema => userVars[sema].getPosition(ctrl) > 0);
// Lanes
const {lane1, lane2, redrawTraverser} = Sim;
lane1.sort(Traverser.compareTraversers);
lane2.sort(Traverser.compareTraversers);
lane1.forEach(redrawTraverser);
lane2.forEach(redrawTraverser);
// FIXME: Maybe it's better to use Proxy(userVars) and redraw after its attributes are accessed..
// Redraw before each repaint
requestAnimationFrame(Sim.redraw);
}
/**
* Update Traverser
*
* @param {Traverser} t
* @param {number} i - index
*/
static redrawTraverser(t, i) {
t.$elem.style.setProperty('--pos', i);
t.$elem.title =
`${t.name}\n\n` +
`orderVec: {${t.orderVec.join(', ')}}\n` +
`waitVec: {${t.getWaitVec().join(', ')}}`;
}
static showError(algoSource, message) {
Sim.ui[ '$' + algoSource ].setAttribute('title', message);
Sim.stop();
}
static clearErrors() {
Sim.ui.$form.querySelectorAll('[title]').forEach($x => $x.removeAttribute('title'));
}
/**
* Populate UI inputs with preset data
*
* @param {object} preset
*/
static loadPreset(preset) {
const keys = 'semaNames semaVals intNames intVals controller traverser1 traverser2'.split(' ');
for (const key of keys) {
Sim.ui['$' + key].value = preset[key];
}
}
}
class Algorithm {
/**
* @param {String} algoSource - "controller", "traverser1", or "traverser2"
*/
constructor(algoSource) {
this.algoSource = algoSource;
this.userAlgo = Sim.userAlgos[algoSource];
this.orderVec = Algorithm.parseOrderVector(this.userAlgo);
}
async run() {
const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor;
try {
const asyncFunc = new AsyncFunction(`
with (Sim.userVars) {
${ Algorithm.awaitifyThis(this.userAlgo) }
}
`);
await asyncFunc.call(this);
} catch (userError) {
console.error('userError', userError); // for actual debugging
Sim.showError(this.algoSource, userError.message);
}
}
/**
* Replace function calls with `await`ed method invocations associated with `this` object.
*
* @param {string} code
* @returns {string} Updated code
*/
static awaitifyThis(code) {
// Prefix "p", "v", "sleep", and "traverse" calls with `await` and attach them to `this`
return code.replace(/\b(p|v|sleep|traverse)\(/g, 'await this.$1(');
}
async | (x) {
await this.sleep(0);
await x.acquire(this);
}
async v(x) {
await this.sleep(0);
await x.release(this);
}
async sleep(secs) {
return new Promise((resolve, _reject) => {
setTimeout(resolve, secs * 1000);
});
}
/**
* A vehicle's place in line is determined by a vector of priorities
* which are based on calls to p(...)
*
* @todo Should dedup before returning?
*
* @param {string} code
* @return {Array<string>}
*/
static parseOrderVector(code) {
const pCalls = code.match(/\bp\(\s*(\w+)\s*\)/g) || [];
const acquiredSemaphores = pCalls.map(x => x.slice(2, -1).trim());
return acquiredSemaphores;
}
}
class Controller extends Algorithm {
constructor() {
super('controller');
}
}
class Traverser extends Algorithm {
static counter = 0;
static freeColors = 'blue coral darkkhaki firebrick yellowgreen gray skyblue teal orange pink purple yellow'.split(' ');
constructor(algoSource) {
super(algoSource);
this.id = this.getUniqueId();
this.color = this.getUniqueColor();
this.type = Math.random() < 0.25 ? 'truck' : 'car'; // 25% chance of being a truck
this.name | p | identifier_name |
index.js | {};
static ctrl = null;
static lane1 = [];
static lane2 = [];
static crossing = 0;
static ui = {};
static creatorInterval = null;
static start() {
// if (Sim.ui.$sim === undefined) Sim.resetUI();
Sim.clearErrors();
Sim.loadUserInputs();
Sim.ui.$fieldset.disabled = true;
Sim.ctrl = new Controller();
Sim.ctrl.run();
Sim.initCreator();
Sim.redraw();
}
static stop() {
function freezeSimUI() {
// To replace all elements so no one can alter the current state
Sim.ui.$sim.outerHTML = Sim.ui.$sim.outerHTML;
// To signal that they no longer reference actual DOM elements and free them maybe(?)
Sim.ui = {};
}
function freeVariables() {
// Kill all user defined semaphores
Object.entries(Sim.userVars)
.map(entry => entry[1])
.filter(val => val instanceof Semaphore)
.forEach(sema => sema.die());
Sim.userVars = {};
}
function freeThreads() {
// TODO:
// - kill "threads" (maybe simply call .destory() of each existing Traverser)
// - cancel Controller 'ctrl' (AFAIK you can not cancel Promises...)
}
Sim.ui.$fieldset.disabled = false;
clearInterval(Sim.creatorInterval);
freezeSimUI();
//freeThreads();
//freeVariables();
}
static setup() {
const $ = str => document.querySelector(str);
Object.assign(Sim.ui, {
$form: $('form'),
$fieldset: $('form fieldset'),
$semaNames: $('#sema-names'),
$semaVals: $('#sema-vals'),
$intNames: $('#int-names'),
$intVals: $('#int-vals'),
$controller: $('#controller'),
$traverser1: $('#traverser1'),
$traverser2: $('#traverser2'),
$sim: $('#sim'),
$intersection: $('#intersection'),
})
Sim.ui.$form.onsubmit = (ev) => {
ev.preventDefault();
Sim.start();
}
$('#btn-load-attempt').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.myAttempt);
}
$('#btn-load-correct').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.correctAnswer);
}
}
static loadUserInputs() {
const {ui} = Sim;
// userAlgos...
Object.assign(Sim.userAlgos,
{
controller: ui.$controller.value,
traverser1: ui.$traverser1.value,
traverser2: ui.$traverser2.value,
}
)
// userVars...
const SEP = /,\s*/; // separator
const userSemas = zipObject(
ui.$semaNames.value.split(SEP),
ui.$semaVals.value.split(SEP).map(val => new Semaphore(Number(val)))
);
const userInts = zipObject(
ui.$intNames.value.split(SEP),
ui.$intVals.value.split(SEP).map(val => Number(val))
);
Object.assign(Sim.userVars, userSemas, userInts);
}
static initCreator() {
// maybe create a new instance of Traverser every second
Sim.creatorInterval = setInterval(Sim.maybeCreateTraverser, 1 * 1000);
}
/**
* Maybe create a new instance of Traverser and run it
* @returns {Traverser} The newly created traverser or null.
*/
static maybeCreateTraverser() {
// maybe not
if (Math.random() < 0.5) {
return null;
}
if (Math.random() < 0.5) {
if (Sim.lane1.length < Traverser1.MAX) {
const t1 = new Traverser1();
Sim.lane1.push(t1);
t1.run();
return t1;
}
} else {
if (Sim.lane2.length < Traverser2.MAX) {
const t2 = new Traverser2();
Sim.lane2.push(t2);
t2.run();
return t2;
}
}
// no room for a new 'Traverser' in the randomly chosen 'lane'
return null;
}
static redraw() |
/**
* Update Traverser
*
* @param {Traverser} t
* @param {number} i - index
*/
static redrawTraverser(t, i) {
t.$elem.style.setProperty('--pos', i);
t.$elem.title =
`${t.name}\n\n` +
`orderVec: {${t.orderVec.join(', ')}}\n` +
`waitVec: {${t.getWaitVec().join(', ')}}`;
}
static showError(algoSource, message) {
Sim.ui[ '$' + algoSource ].setAttribute('title', message);
Sim.stop();
}
static clearErrors() {
Sim.ui.$form.querySelectorAll('[title]').forEach($x => $x.removeAttribute('title'));
}
/**
* Populate UI inputs with preset data
*
* @param {object} preset
*/
static loadPreset(preset) {
const keys = 'semaNames semaVals intNames intVals controller traverser1 traverser2'.split(' ');
for (const key of keys) {
Sim.ui['$' + key].value = preset[key];
}
}
}
class Algorithm {
/**
* @param {String} algoSource - "controller", "traverser1", or "traverser2"
*/
constructor(algoSource) {
this.algoSource = algoSource;
this.userAlgo = Sim.userAlgos[algoSource];
this.orderVec = Algorithm.parseOrderVector(this.userAlgo);
}
async run() {
const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor;
try {
const asyncFunc = new AsyncFunction(`
with (Sim.userVars) {
${ Algorithm.awaitifyThis(this.userAlgo) }
}
`);
await asyncFunc.call(this);
} catch (userError) {
console.error('userError', userError); // for actual debugging
Sim.showError(this.algoSource, userError.message);
}
}
/**
* Replace function calls with `await`ed method invocations associated with `this` object.
*
* @param {string} code
* @returns {string} Updated code
*/
static awaitifyThis(code) {
// Prefix "p", "v", "sleep", and "traverse" calls with `await` and attach them to `this`
return code.replace(/\b(p|v|sleep|traverse)\(/g, 'await this.$1(');
}
async p(x) {
await this.sleep(0);
await x.acquire(this);
}
async v(x) {
await this.sleep(0);
await x.release(this);
}
async sleep(secs) {
return new Promise((resolve, _reject) => {
setTimeout(resolve, secs * 1000);
});
}
/**
* A vehicle's place in line is determined by a vector of priorities
* which are based on calls to p(...)
*
* @todo Should dedup before returning?
*
* @param {string} code
* @return {Array<string>}
*/
static parseOrderVector(code) {
const pCalls = code.match(/\bp\(\s*(\w+)\s*\)/g) || [];
const acquiredSemaphores = pCalls.map(x => x.slice(2, -1).trim());
return acquiredSemaphores;
}
}
class Controller extends Algorithm {
constructor() {
super('controller');
}
}
class Traverser extends Algorithm {
static counter = 0;
static freeColors = 'blue coral darkkhaki firebrick yellowgreen gray skyblue teal orange pink purple yellow'.split(' ');
constructor(algoSource) {
super(algoSource);
this.id = this.getUniqueId();
this.color = this.getUniqueColor();
this.type = Math.random() < 0.25 ? 'truck' : 'car'; // 25% chance of being a truck
this.name | {
// Just update 'data-*' and '--pos' values, and let CSS take care of the rest.
// Traffic light
const {ui, ctrl, userVars} = Sim;
ui.$sim.dataset.light = userVars.light;
ui.$sim.dataset.ctrlQueued = ctrl.orderVec.some(sema => userVars[sema].getPosition(ctrl) > 0);
// Lanes
const {lane1, lane2, redrawTraverser} = Sim;
lane1.sort(Traverser.compareTraversers);
lane2.sort(Traverser.compareTraversers);
lane1.forEach(redrawTraverser);
lane2.forEach(redrawTraverser);
// FIXME: Maybe it's better to use Proxy(userVars) and redraw after its attributes are accessed..
// Redraw before each repaint
requestAnimationFrame(Sim.redraw);
} | identifier_body |
index.js |
}
}
class Sim {
static userVars = {};
static userAlgos = {};
static ctrl = null;
static lane1 = [];
static lane2 = [];
static crossing = 0;
static ui = {};
static creatorInterval = null;
static start() {
// if (Sim.ui.$sim === undefined) Sim.resetUI();
Sim.clearErrors();
Sim.loadUserInputs();
Sim.ui.$fieldset.disabled = true;
Sim.ctrl = new Controller();
Sim.ctrl.run();
Sim.initCreator();
Sim.redraw();
}
static stop() {
function freezeSimUI() {
// To replace all elements so no one can alter the current state
Sim.ui.$sim.outerHTML = Sim.ui.$sim.outerHTML;
// To signal that they no longer reference actual DOM elements and free them maybe(?)
Sim.ui = {};
}
function freeVariables() {
// Kill all user defined semaphores
Object.entries(Sim.userVars)
.map(entry => entry[1])
.filter(val => val instanceof Semaphore)
.forEach(sema => sema.die());
Sim.userVars = {};
}
function freeThreads() {
// TODO:
// - kill "threads" (maybe simply call .destory() of each existing Traverser)
// - cancel Controller 'ctrl' (AFAIK you can not cancel Promises...)
}
Sim.ui.$fieldset.disabled = false;
clearInterval(Sim.creatorInterval);
freezeSimUI();
//freeThreads();
//freeVariables();
}
static setup() {
const $ = str => document.querySelector(str);
Object.assign(Sim.ui, {
$form: $('form'),
$fieldset: $('form fieldset'),
$semaNames: $('#sema-names'),
$semaVals: $('#sema-vals'),
$intNames: $('#int-names'),
$intVals: $('#int-vals'),
$controller: $('#controller'),
$traverser1: $('#traverser1'),
$traverser2: $('#traverser2'),
$sim: $('#sim'),
$intersection: $('#intersection'),
})
Sim.ui.$form.onsubmit = (ev) => {
ev.preventDefault();
Sim.start();
}
$('#btn-load-attempt').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.myAttempt);
}
$('#btn-load-correct').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.correctAnswer);
}
}
static loadUserInputs() {
const {ui} = Sim;
// userAlgos...
Object.assign(Sim.userAlgos,
{
controller: ui.$controller.value,
traverser1: ui.$traverser1.value,
traverser2: ui.$traverser2.value,
}
)
// userVars...
const SEP = /,\s*/; // separator
const userSemas = zipObject(
ui.$semaNames.value.split(SEP),
ui.$semaVals.value.split(SEP).map(val => new Semaphore(Number(val)))
);
const userInts = zipObject(
ui.$intNames.value.split(SEP),
ui.$intVals.value.split(SEP).map(val => Number(val))
);
Object.assign(Sim.userVars, userSemas, userInts);
}
static initCreator() {
// maybe create a new instance of Traverser every second
Sim.creatorInterval = setInterval(Sim.maybeCreateTraverser, 1 * 1000);
}
/**
* Maybe create a new instance of Traverser and run it
* @returns {Traverser} The newly created traverser or null.
*/
static maybeCreateTraverser() {
// maybe not
if (Math.random() < 0.5) {
return null;
}
if (Math.random() < 0.5) {
if (Sim.lane1.length < Traverser1.MAX) {
const t1 = new Traverser1();
Sim.lane1.push(t1);
t1.run();
return t1;
}
} else {
if (Sim.lane2.length < Traverser2.MAX) {
const t2 = new Traverser2();
Sim.lane2.push(t2);
t2.run();
return t2;
}
}
// no room for a new 'Traverser' in the randomly chosen 'lane'
return null;
}
static redraw() {
// Just update 'data-*' and '--pos' values, and let CSS take care of the rest.
// Traffic light
const {ui, ctrl, userVars} = Sim;
ui.$sim.dataset.light = userVars.light;
ui.$sim.dataset.ctrlQueued = ctrl.orderVec.some(sema => userVars[sema].getPosition(ctrl) > 0);
// Lanes
const {lane1, lane2, redrawTraverser} = Sim;
lane1.sort(Traverser.compareTraversers);
lane2.sort(Traverser.compareTraversers);
lane1.forEach(redrawTraverser);
lane2.forEach(redrawTraverser);
// FIXME: Maybe it's better to use Proxy(userVars) and redraw after its attributes are accessed..
// Redraw before each repaint
requestAnimationFrame(Sim.redraw);
}
/**
* Update Traverser
*
* @param {Traverser} t
* @param {number} i - index
*/
static redrawTraverser(t, i) {
t.$elem.style.setProperty('--pos', i);
t.$elem.title =
`${t.name}\n\n` +
`orderVec: {${t.orderVec.join(', ')}}\n` +
`waitVec: {${t.getWaitVec().join(', ')}}`;
}
static showError(algoSource, message) {
Sim.ui[ '$' + algoSource ].setAttribute('title', message);
Sim.stop();
}
static clearErrors() {
Sim.ui.$form.querySelectorAll('[title]').forEach($x => $x.removeAttribute('title'));
}
/**
* Populate UI inputs with preset data
*
* @param {object} preset
*/
static loadPreset(preset) {
const keys = 'semaNames semaVals intNames intVals controller traverser1 traverser2'.split(' ');
for (const key of keys) {
Sim.ui['$' + key].value = preset[key];
}
}
}
class Algorithm {
/**
* @param {String} algoSource - "controller", "traverser1", or "traverser2"
*/
constructor(algoSource) {
this.algoSource = algoSource;
this.userAlgo = Sim.userAlgos[algoSource];
this.orderVec = Algorithm.parseOrderVector(this.userAlgo);
}
async run() {
const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor;
try {
const asyncFunc = new AsyncFunction(`
with (Sim.userVars) {
${ Algorithm.awaitifyThis(this.userAlgo) }
}
`);
await asyncFunc.call(this);
} catch (userError) {
console.error('userError', userError); // for actual debugging
Sim.showError(this.algoSource, userError.message);
}
}
/**
* Replace function calls with `await`ed method invocations associated with `this` object.
*
* @param {string} code
* @returns {string} Updated code
*/
static awaitifyThis(code) {
// Prefix "p", "v", "sleep", and "traverse" calls with `await` and attach them to `this`
return code.replace(/\b(p|v|sleep|traverse)\(/g, 'await this.$1(');
}
async p(x) {
await this.sleep(0);
await x.acquire(this);
}
async v(x) {
await this.sleep(0);
await x.release(this);
}
async sleep(secs) {
return new Promise((resolve, _reject) => {
setTimeout(resolve, secs * 1000);
});
}
/**
* A vehicle's place in line is determined by a vector of priorities
* which are based on calls to p(...)
*
* @todo Should dedup before returning?
*
* @param {string} code
* @return {Array<string>}
*/
static parseOrderVector(code) {
const pCalls = code.match(/\bp\(\s*(\w+)\s*\)/g) || [];
const acquiredSemaphores = pCalls.map(x => x.slice(2, -1).trim());
return acquiredSemaphores;
}
}
class Controller extends Algorithm {
constructor() {
super('controller');
}
}
class Traverser extends Algorithm {
static counter = 0;
static freeColors = 'blue coral darkkhaki firebrick yellowgreen gray skyblue teal orange pink purple yellow'.split(' ');
constructor(algoSource) {
super(algoSource);
this.id = this | {
const entry = this._queue.shift();
if (entry) {
this._permits--;
entry.resolve();
}
} | conditional_block |
|
index.js | constructor(permits) {
this._permits = permits;
this._queue = [];
}
getPosition(acquirer) {
const idx = this._queue.findIndex(entry => entry.acquirer === acquirer);
return idx + 1; // to get rid of '-1'
}
async acquire(acquirer) {
return new Promise( (resolve, reject) => {
this._queue.push({
acquirer,
resolve,
reject
});
this._maybeNotify();
});
}
async release(_acquirer) {
this._permits++;
this._maybeNotify();
}
/**
* Reject all pending promises and nullify the 'queue'
* so that future calls fail...
*/
die() {
let entry;
while (entry = this._queue.pop()) {
entry.reject();
}
this._queue = null;
}
_maybeNotify() {
if (this._permits > 0) {
const entry = this._queue.shift();
if (entry) {
this._permits--;
entry.resolve();
}
}
}
}
class Sim {
static userVars = {};
static userAlgos = {};
static ctrl = null;
static lane1 = [];
static lane2 = [];
static crossing = 0;
static ui = {};
static creatorInterval = null;
static start() {
// if (Sim.ui.$sim === undefined) Sim.resetUI();
Sim.clearErrors();
Sim.loadUserInputs();
Sim.ui.$fieldset.disabled = true;
Sim.ctrl = new Controller();
Sim.ctrl.run();
Sim.initCreator();
Sim.redraw();
}
static stop() {
function freezeSimUI() {
// To replace all elements so no one can alter the current state
Sim.ui.$sim.outerHTML = Sim.ui.$sim.outerHTML;
// To signal that they no longer reference actual DOM elements and free them maybe(?)
Sim.ui = {};
}
function freeVariables() {
// Kill all user defined semaphores
Object.entries(Sim.userVars)
.map(entry => entry[1])
.filter(val => val instanceof Semaphore)
.forEach(sema => sema.die());
Sim.userVars = {};
}
function freeThreads() {
// TODO:
// - kill "threads" (maybe simply call .destory() of each existing Traverser)
// - cancel Controller 'ctrl' (AFAIK you can not cancel Promises...)
}
Sim.ui.$fieldset.disabled = false;
clearInterval(Sim.creatorInterval);
freezeSimUI();
//freeThreads();
//freeVariables();
}
static setup() {
const $ = str => document.querySelector(str);
Object.assign(Sim.ui, {
$form: $('form'),
$fieldset: $('form fieldset'),
$semaNames: $('#sema-names'),
$semaVals: $('#sema-vals'),
$intNames: $('#int-names'),
$intVals: $('#int-vals'),
$controller: $('#controller'),
$traverser1: $('#traverser1'),
$traverser2: $('#traverser2'),
$sim: $('#sim'),
$intersection: $('#intersection'),
})
Sim.ui.$form.onsubmit = (ev) => {
ev.preventDefault();
Sim.start();
}
$('#btn-load-attempt').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.myAttempt);
}
$('#btn-load-correct').onclick = (ev) => {
ev.preventDefault();
Sim.loadPreset(Sim.presets.correctAnswer);
}
}
static loadUserInputs() {
const {ui} = Sim;
// userAlgos...
Object.assign(Sim.userAlgos,
{
controller: ui.$controller.value,
traverser1: ui.$traverser1.value,
traverser2: ui.$traverser2.value,
}
)
// userVars...
const SEP = /,\s*/; // separator
const userSemas = zipObject(
ui.$semaNames.value.split(SEP),
ui.$semaVals.value.split(SEP).map(val => new Semaphore(Number(val)))
);
const userInts = zipObject(
ui.$intNames.value.split(SEP),
ui.$intVals.value.split(SEP).map(val => Number(val))
);
Object.assign(Sim.userVars, userSemas, userInts);
}
static initCreator() {
// maybe create a new instance of Traverser every second
Sim.creatorInterval = setInterval(Sim.maybeCreateTraverser, 1 * 1000);
}
/**
* Maybe create a new instance of Traverser and run it
* @returns {Traverser} The newly created traverser or null.
*/
static maybeCreateTraverser() {
// maybe not
if (Math.random() < 0.5) {
return null;
}
if (Math.random() < 0.5) {
if (Sim.lane1.length < Traverser1.MAX) {
const t1 = new Traverser1();
Sim.lane1.push(t1);
t1.run();
return t1;
}
} else {
if (Sim.lane2.length < Traverser2.MAX) {
const t2 = new Traverser2();
Sim.lane2.push(t2);
t2.run();
return t2;
}
}
// no room for a new 'Traverser' in the randomly chosen 'lane'
return null;
}
static redraw() {
// Just update 'data-*' and '--pos' values, and let CSS take care of the rest.
// Traffic light
const {ui, ctrl, userVars} = Sim;
ui.$sim.dataset.light = userVars.light;
ui.$sim.dataset.ctrlQueued = ctrl.orderVec.some(sema => userVars[sema].getPosition(ctrl) > 0);
// Lanes
const {lane1, lane2, redrawTraverser} = Sim;
lane1.sort(Traverser.compareTraversers);
lane2.sort(Traverser.compareTraversers);
lane1.forEach(redrawTraverser);
lane2.forEach(redrawTraverser);
// FIXME: Maybe it's better to use Proxy(userVars) and redraw after its attributes are accessed..
// Redraw before each repaint
requestAnimationFrame(Sim.redraw);
}
/**
* Update Traverser
*
* @param {Traverser} t
* @param {number} i - index
*/
static redrawTraverser(t, i) {
t.$elem.style.setProperty('--pos', i);
t.$elem.title =
`${t.name}\n\n` +
`orderVec: {${t.orderVec.join(', ')}}\n` +
`waitVec: {${t.getWaitVec().join(', ')}}`;
}
static showError(algoSource, message) {
Sim.ui[ '$' + algoSource ].setAttribute('title', message);
Sim.stop();
}
static clearErrors() {
Sim.ui.$form.querySelectorAll('[title]').forEach($x => $x.removeAttribute('title'));
}
/**
* Populate UI inputs with preset data
*
* @param {object} preset
*/
static loadPreset(preset) {
const keys = 'semaNames semaVals intNames intVals controller traverser1 traverser2'.split(' ');
for (const key of keys) {
Sim.ui['$' + key].value = preset[key];
}
}
}
class Algorithm {
/**
* @param {String} algoSource - "controller", "traverser1", or "traverser2"
*/
constructor(algoSource) {
this.algoSource = algoSource;
this.userAlgo = Sim.userAlgos[algoSource];
this.orderVec = Algorithm.parseOrderVector(this.userAlgo);
}
async run() {
const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor;
try {
const asyncFunc = new AsyncFunction(`
with (Sim.userVars) {
${ Algorithm.awaitifyThis(this.userAlgo) }
}
`);
await asyncFunc.call(this);
} catch (userError) {
console.error('userError', userError); // for actual debugging
Sim.showError(this.algoSource, userError.message);
}
}
/**
* Replace function calls with `await`ed method invocations associated with `this` object.
*
* @param {string} code
* @returns {string} Updated code
*/
static awaitifyThis(code) {
// Prefix "p", "v", "sleep", and "traverse" calls with `await` and attach them to `this`
return code.replace(/\b(p|v|sleep|traverse)\(/g, 'await this.$1(');
}
async p(x) {
await this.sleep(0);
await x.acquire(this);
}
async v(x) {
await this.sleep(0);
await x.release(this);
}
async sleep(secs) {
return new | * A basic Promise-based and queue-based Semaphore
*/
const Semaphore = class SillySemaphore {
| random_line_split |
|
NeuralNetworks.py | = calculate_predictions(test_data,test_y, widths, activations, weights)
test_error = calculate_error(test_y, test_predictions)
errors.append([width, train_error, test_error])
print(width, " COMPLETE")
for error in errors:
print("Width:", error[0], ", Train Error:", error[1], ", Test Error:", error[2])
def compute_hyperparameters(activations, deriv_activations, zeros):
gammas = [1, 0.5]
ds = [1, 0.1]
smallest = [0, 100.0, 100.0]
for gamma in gammas:
for d in ds: smallest = get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros)
print("----------------------")
return smallest
def get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros):
# Computes the error for the given parameters and returns the error if it is the smallest, or the previous smallest.
|
def calculate_predictions(data, y, widths, activations, weights):
predictions = copy.deepcopy(y)
for i in range(len(data)):
predictions[i] = np.sign(run_forward_pass(weights, data[i], widths, activations)[-1])
return predictions
def calculate_error(y, predictions):
return 1 - np.count_nonzero(np.multiply(y, predictions) == 1) / len(y)
def run_sgd(initial_gamma, d, widths, activations, deriv_activations, zeros, n=872):
weights = create_weights(widths, zeros)
loss = []
for epoch in range(100):
learning_rate = update_learning_rate(initial_gamma, d, epoch)
[y, x] = shuffle_data(train_y, train_data)
l = 0
for i in range(n):
nodes = run_forward_pass(weights, x[i], widths, activations)
prediction = np.sign(nodes[-1])
weights_grad = run_backpropagation(weights, nodes, y[i], prediction, deriv_activations)
weights = update_weights(weights, learning_rate, weights_grad)
l += compute_loss(prediction, y[i])
loss.append(l)
# print("LOSS:", loss)
return weights
def create_weights(widths, zeros):
weights = []
for level in range(len(widths) - 2):
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, widths[level + 1] - 1).tolist())
else:
temp.append([0] * (widths[level + 1] - 1))
weights.append(temp)
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, 1).tolist())
else:
temp.append([1])
weights.append(temp)
return np.array(weights)
def shuffle_data(y, data):
"""Shuffles the given data by appending y to the data, then shuffling, then returns the separated data and y."""
combined = np.c_[data.reshape(len(data), -1), y.reshape(len(y), -1)]
np.random.shuffle(combined)
shuffled_data = combined[:, :data.size // len(data)].reshape(data.shape)
shuffled_y = combined[:, data.size // len(data):].reshape(y.shape)
return [shuffled_y, shuffled_data]
def update_learning_rate(initial_gamma, d, epoch):
return initial_gamma / (1.0 + epoch * (initial_gamma / d))
def update_weights(weights, learning_rate, weights_grad):
for i in range(len(weights_grad)):
for j in range(len(weights_grad[i])):
for k in range(len(weights_grad[i][j])):
if type(weights[i][j][k]) == np.matrix:
weights[i][j][k][0, 0] -= learning_rate * weights_grad[i][j][k][0, 0]
else:
weights[i][j][k] -= learning_rate * weights_grad[i][j][k]
return weights
def compute_loss(prediction, label):
return np.square(prediction[0] - label[0, 0]) / 2
# Forward Pass
def run_forward_pass(weights, example, widths, activations):
shape = []
for i in range(len(widths)):
shape.append(np.zeros(widths[i]))
nodes = np.array(shape)
nodes[0] = example
for i in range(1, len(nodes)):
nodes[i] = activations[i-1](widths[i], weights[i-1], nodes[i-1])
return nodes
def linear_activation(width, weights, prev_nodes):
curr_nodes = np.zeros(width)
for j in range(len(curr_nodes)):
for i in range(len(prev_nodes)):
curr_nodes[j] += prev_nodes[i] * weights[i][j]
return curr_nodes
def sigmoid_activation(width, weights, prev_nodes):
prev_nodes = copy.deepcopy(prev_nodes)
if prev_nodes.ndim > 1:
prev_nodes = np.asarray(prev_nodes.T)
prev_nodes = prev_nodes[:, 0]
curr_nodes = np.zeros(width)
curr_nodes[0] = 1
for j in range(len(curr_nodes) - 1):
z = 0
for i in range(len(prev_nodes)):
z += prev_nodes[i] * weights[i][j]
curr_nodes[j + 1] = compute_sigmoid(z)
return curr_nodes
def compute_sigmoid(z):
return 1/(1+np.exp(-z))
# Backpropagation
def run_backpropagation(weights, nodes, y, prediction, activations):
loss_deriv = prediction - y
prev_node_derivs = [loss_deriv]
weight_derivs = copy.deepcopy(weights)
is_last_level = True
for level in range(len(weights) - 1, -1, -1):
weight_derivs[level] = compute_weight_derivs(weight_derivs[level], prev_node_derivs, nodes[level+1], nodes[level], activations[level])
prev_node_derivs = compute_node_derivatives(weights[level], nodes[level], prev_node_derivs, is_last_level)
is_last_level = False
return weight_derivs
def compute_weight_derivs(weight_derivs, prev_node_derivs, prev_nodes, next_nodes, activation):
start = 0
if activation == sigmoid_activation_deriv: start = 1
for i in range(len(weight_derivs)):
for j in range(start, len(weight_derivs[i]) + start):
if next_nodes.ndim == 2:
next_nodes = copy.deepcopy(next_nodes)
next_nodes = np.asarray(next_nodes.T)
next_nodes = next_nodes[:, 0]
weight_derivs[i][j-start] = activation(prev_node_derivs[j], next_nodes[i], prev_nodes[j])
return weight_derivs
def linear_activation_deriv(prev_node_deriv, next_node, _):
return prev_node_deriv[0] * next_node
def sigmoid_activation_deriv(prev_node_deriv, next_node, prev_node):
return prev_node_deriv * next_node * prev_node * (1-prev_node)
def compute_node_derivatives(weights, curr_nodes, prev_node_derivs, is_last_level):
curr_node_derivs = np.zeros(curr_nodes.shape)
for i in range(len(curr_nodes)):
product = 0
for j in range(len(weights[i])):
k = j
if not is_last_level: k += 1
product += weights[i][j] * prev_node_derivs[k]
curr_node_derivs[i] = product
return curr_node_derivs
def import_data(path, num_examples):
"""Imports the data at the given path to a csv file with the given amount of examples."""
data = np.empty((num_examples, 5), dtype="float128")
y = np.empty((num_examples, 1), dtype="float128")
with open(path, 'r') as f:
i = 0
for line in f:
example = []
terms = line.strip().split(',')
for j in range(len(terms)):
if j == 4:
y[i] = 2 * float(terms[j]) - 1
else:
example.append(float(terms[j]))
data[i, 1:] = example
data[i, 0] = 1
i += 1
data = normalize(np.asmatrix(data), axis=0)
return [data, np.asmatrix(y)]
def run_example():
widths = np.array([3, 3, 3, 1])
train_x = np.array([1., 1., 1.])
train_y = np.array([1])
weights = np.array([
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1.], [2.], [- | widths = [5, 5, 5, 1]
weights = run_sgd(gamma, d, widths, activations, deriv_activations, zeros)
predictions = calculate_predictions(train_data, train_y, widths, activations, weights)
error = calculate_error(train_y, predictions)
print("GAMMA:", gamma, " D:", d, " ERROR:", error)
if error < smallest[2]: smallest = [gamma, d, error]
return smallest | identifier_body |
NeuralNetworks.py | = calculate_predictions(test_data,test_y, widths, activations, weights)
test_error = calculate_error(test_y, test_predictions)
errors.append([width, train_error, test_error])
print(width, " COMPLETE")
for error in errors:
print("Width:", error[0], ", Train Error:", error[1], ", Test Error:", error[2])
def compute_hyperparameters(activations, deriv_activations, zeros):
gammas = [1, 0.5]
ds = [1, 0.1]
smallest = [0, 100.0, 100.0]
for gamma in gammas:
|
return smallest
def get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros):
# Computes the error for the given parameters and returns the error if it is the smallest, or the previous smallest.
widths = [5, 5, 5, 1]
weights = run_sgd(gamma, d, widths, activations, deriv_activations, zeros)
predictions = calculate_predictions(train_data, train_y, widths, activations, weights)
error = calculate_error(train_y, predictions)
print("GAMMA:", gamma, " D:", d, " ERROR:", error)
if error < smallest[2]: smallest = [gamma, d, error]
return smallest
def calculate_predictions(data, y, widths, activations, weights):
predictions = copy.deepcopy(y)
for i in range(len(data)):
predictions[i] = np.sign(run_forward_pass(weights, data[i], widths, activations)[-1])
return predictions
def calculate_error(y, predictions):
return 1 - np.count_nonzero(np.multiply(y, predictions) == 1) / len(y)
def run_sgd(initial_gamma, d, widths, activations, deriv_activations, zeros, n=872):
weights = create_weights(widths, zeros)
loss = []
for epoch in range(100):
learning_rate = update_learning_rate(initial_gamma, d, epoch)
[y, x] = shuffle_data(train_y, train_data)
l = 0
for i in range(n):
nodes = run_forward_pass(weights, x[i], widths, activations)
prediction = np.sign(nodes[-1])
weights_grad = run_backpropagation(weights, nodes, y[i], prediction, deriv_activations)
weights = update_weights(weights, learning_rate, weights_grad)
l += compute_loss(prediction, y[i])
loss.append(l)
# print("LOSS:", loss)
return weights
def create_weights(widths, zeros):
weights = []
for level in range(len(widths) - 2):
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, widths[level + 1] - 1).tolist())
else:
temp.append([0] * (widths[level + 1] - 1))
weights.append(temp)
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, 1).tolist())
else:
temp.append([1])
weights.append(temp)
return np.array(weights)
def shuffle_data(y, data):
"""Shuffles the given data by appending y to the data, then shuffling, then returns the separated data and y."""
combined = np.c_[data.reshape(len(data), -1), y.reshape(len(y), -1)]
np.random.shuffle(combined)
shuffled_data = combined[:, :data.size // len(data)].reshape(data.shape)
shuffled_y = combined[:, data.size // len(data):].reshape(y.shape)
return [shuffled_y, shuffled_data]
def update_learning_rate(initial_gamma, d, epoch):
return initial_gamma / (1.0 + epoch * (initial_gamma / d))
def update_weights(weights, learning_rate, weights_grad):
for i in range(len(weights_grad)):
for j in range(len(weights_grad[i])):
for k in range(len(weights_grad[i][j])):
if type(weights[i][j][k]) == np.matrix:
weights[i][j][k][0, 0] -= learning_rate * weights_grad[i][j][k][0, 0]
else:
weights[i][j][k] -= learning_rate * weights_grad[i][j][k]
return weights
def compute_loss(prediction, label):
return np.square(prediction[0] - label[0, 0]) / 2
# Forward Pass
def run_forward_pass(weights, example, widths, activations):
shape = []
for i in range(len(widths)):
shape.append(np.zeros(widths[i]))
nodes = np.array(shape)
nodes[0] = example
for i in range(1, len(nodes)):
nodes[i] = activations[i-1](widths[i], weights[i-1], nodes[i-1])
return nodes
def linear_activation(width, weights, prev_nodes):
curr_nodes = np.zeros(width)
for j in range(len(curr_nodes)):
for i in range(len(prev_nodes)):
curr_nodes[j] += prev_nodes[i] * weights[i][j]
return curr_nodes
def sigmoid_activation(width, weights, prev_nodes):
prev_nodes = copy.deepcopy(prev_nodes)
if prev_nodes.ndim > 1:
prev_nodes = np.asarray(prev_nodes.T)
prev_nodes = prev_nodes[:, 0]
curr_nodes = np.zeros(width)
curr_nodes[0] = 1
for j in range(len(curr_nodes) - 1):
z = 0
for i in range(len(prev_nodes)):
z += prev_nodes[i] * weights[i][j]
curr_nodes[j + 1] = compute_sigmoid(z)
return curr_nodes
def compute_sigmoid(z):
return 1/(1+np.exp(-z))
# Backpropagation
def run_backpropagation(weights, nodes, y, prediction, activations):
loss_deriv = prediction - y
prev_node_derivs = [loss_deriv]
weight_derivs = copy.deepcopy(weights)
is_last_level = True
for level in range(len(weights) - 1, -1, -1):
weight_derivs[level] = compute_weight_derivs(weight_derivs[level], prev_node_derivs, nodes[level+1], nodes[level], activations[level])
prev_node_derivs = compute_node_derivatives(weights[level], nodes[level], prev_node_derivs, is_last_level)
is_last_level = False
return weight_derivs
def compute_weight_derivs(weight_derivs, prev_node_derivs, prev_nodes, next_nodes, activation):
start = 0
if activation == sigmoid_activation_deriv: start = 1
for i in range(len(weight_derivs)):
for j in range(start, len(weight_derivs[i]) + start):
if next_nodes.ndim == 2:
next_nodes = copy.deepcopy(next_nodes)
next_nodes = np.asarray(next_nodes.T)
next_nodes = next_nodes[:, 0]
weight_derivs[i][j-start] = activation(prev_node_derivs[j], next_nodes[i], prev_nodes[j])
return weight_derivs
def linear_activation_deriv(prev_node_deriv, next_node, _):
return prev_node_deriv[0] * next_node
def sigmoid_activation_deriv(prev_node_deriv, next_node, prev_node):
return prev_node_deriv * next_node * prev_node * (1-prev_node)
def compute_node_derivatives(weights, curr_nodes, prev_node_derivs, is_last_level):
curr_node_derivs = np.zeros(curr_nodes.shape)
for i in range(len(curr_nodes)):
product = 0
for j in range(len(weights[i])):
k = j
if not is_last_level: k += 1
product += weights[i][j] * prev_node_derivs[k]
curr_node_derivs[i] = product
return curr_node_derivs
def import_data(path, num_examples):
"""Imports the data at the given path to a csv file with the given amount of examples."""
data = np.empty((num_examples, 5), dtype="float128")
y = np.empty((num_examples, 1), dtype="float128")
with open(path, 'r') as f:
i = 0
for line in f:
example = []
terms = line.strip().split(',')
for j in range(len(terms)):
if j == 4:
y[i] = 2 * float(terms[j]) - 1
else:
example.append(float(terms[j]))
data[i, 1:] = example
data[i, 0] = 1
i += 1
data = normalize(np.asmatrix(data), axis=0)
return [data, np.asmatrix(y)]
def run_example():
widths = np.array([3, 3, 3, 1])
train_x = np.array([1., 1., 1.])
train_y = np.array([1])
weights = np.array([
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1.], [2.], [- | for d in ds: smallest = get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros)
print("----------------------") | conditional_block |
NeuralNetworks.py | = calculate_predictions(test_data,test_y, widths, activations, weights)
test_error = calculate_error(test_y, test_predictions)
errors.append([width, train_error, test_error])
print(width, " COMPLETE")
for error in errors:
print("Width:", error[0], ", Train Error:", error[1], ", Test Error:", error[2])
def compute_hyperparameters(activations, deriv_activations, zeros):
gammas = [1, 0.5]
ds = [1, 0.1]
smallest = [0, 100.0, 100.0]
for gamma in gammas:
for d in ds: smallest = get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros)
print("----------------------")
return smallest
def get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros):
# Computes the error for the given parameters and returns the error if it is the smallest, or the previous smallest.
widths = [5, 5, 5, 1]
weights = run_sgd(gamma, d, widths, activations, deriv_activations, zeros)
predictions = calculate_predictions(train_data, train_y, widths, activations, weights)
error = calculate_error(train_y, predictions)
print("GAMMA:", gamma, " D:", d, " ERROR:", error)
if error < smallest[2]: smallest = [gamma, d, error]
return smallest
def calculate_predictions(data, y, widths, activations, weights):
predictions = copy.deepcopy(y)
for i in range(len(data)):
predictions[i] = np.sign(run_forward_pass(weights, data[i], widths, activations)[-1])
return predictions
def calculate_error(y, predictions):
return 1 - np.count_nonzero(np.multiply(y, predictions) == 1) / len(y)
def run_sgd(initial_gamma, d, widths, activations, deriv_activations, zeros, n=872):
weights = create_weights(widths, zeros)
loss = []
for epoch in range(100):
learning_rate = update_learning_rate(initial_gamma, d, epoch)
[y, x] = shuffle_data(train_y, train_data)
l = 0
for i in range(n):
nodes = run_forward_pass(weights, x[i], widths, activations)
prediction = np.sign(nodes[-1])
weights_grad = run_backpropagation(weights, nodes, y[i], prediction, deriv_activations)
weights = update_weights(weights, learning_rate, weights_grad)
l += compute_loss(prediction, y[i])
loss.append(l)
# print("LOSS:", loss)
return weights
def create_weights(widths, zeros):
weights = []
for level in range(len(widths) - 2):
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, widths[level + 1] - 1).tolist())
else:
temp.append([0] * (widths[level + 1] - 1))
weights.append(temp)
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, 1).tolist())
else:
temp.append([1])
weights.append(temp)
return np.array(weights)
def shuffle_data(y, data):
"""Shuffles the given data by appending y to the data, then shuffling, then returns the separated data and y."""
combined = np.c_[data.reshape(len(data), -1), y.reshape(len(y), -1)]
np.random.shuffle(combined)
shuffled_data = combined[:, :data.size // len(data)].reshape(data.shape)
shuffled_y = combined[:, data.size // len(data):].reshape(y.shape)
return [shuffled_y, shuffled_data]
def update_learning_rate(initial_gamma, d, epoch):
return initial_gamma / (1.0 + epoch * (initial_gamma / d))
def update_weights(weights, learning_rate, weights_grad):
for i in range(len(weights_grad)):
for j in range(len(weights_grad[i])):
for k in range(len(weights_grad[i][j])):
if type(weights[i][j][k]) == np.matrix:
weights[i][j][k][0, 0] -= learning_rate * weights_grad[i][j][k][0, 0]
else:
weights[i][j][k] -= learning_rate * weights_grad[i][j][k]
return weights
def compute_loss(prediction, label):
return np.square(prediction[0] - label[0, 0]) / 2
# Forward Pass
def run_forward_pass(weights, example, widths, activations):
shape = []
for i in range(len(widths)):
shape.append(np.zeros(widths[i]))
nodes = np.array(shape)
nodes[0] = example
for i in range(1, len(nodes)):
nodes[i] = activations[i-1](widths[i], weights[i-1], nodes[i-1])
return nodes
def linear_activation(width, weights, prev_nodes):
curr_nodes = np.zeros(width)
for j in range(len(curr_nodes)):
for i in range(len(prev_nodes)):
curr_nodes[j] += prev_nodes[i] * weights[i][j]
return curr_nodes
def sigmoid_activation(width, weights, prev_nodes):
prev_nodes = copy.deepcopy(prev_nodes)
if prev_nodes.ndim > 1:
prev_nodes = np.asarray(prev_nodes.T)
prev_nodes = prev_nodes[:, 0]
curr_nodes = np.zeros(width)
curr_nodes[0] = 1
for j in range(len(curr_nodes) - 1):
z = 0
for i in range(len(prev_nodes)):
z += prev_nodes[i] * weights[i][j]
curr_nodes[j + 1] = compute_sigmoid(z)
return curr_nodes
def compute_sigmoid(z):
return 1/(1+np.exp(-z))
# Backpropagation
def run_backpropagation(weights, nodes, y, prediction, activations):
loss_deriv = prediction - y
prev_node_derivs = [loss_deriv]
weight_derivs = copy.deepcopy(weights)
is_last_level = True
for level in range(len(weights) - 1, -1, -1):
weight_derivs[level] = compute_weight_derivs(weight_derivs[level], prev_node_derivs, nodes[level+1], nodes[level], activations[level])
prev_node_derivs = compute_node_derivatives(weights[level], nodes[level], prev_node_derivs, is_last_level)
is_last_level = False
return weight_derivs
def compute_weight_derivs(weight_derivs, prev_node_derivs, prev_nodes, next_nodes, activation):
start = 0
if activation == sigmoid_activation_deriv: start = 1
for i in range(len(weight_derivs)):
for j in range(start, len(weight_derivs[i]) + start):
if next_nodes.ndim == 2:
next_nodes = copy.deepcopy(next_nodes)
next_nodes = np.asarray(next_nodes.T)
next_nodes = next_nodes[:, 0]
weight_derivs[i][j-start] = activation(prev_node_derivs[j], next_nodes[i], prev_nodes[j])
return weight_derivs
def linear_activation_deriv(prev_node_deriv, next_node, _):
return prev_node_deriv[0] * next_node
def sigmoid_activation_deriv(prev_node_deriv, next_node, prev_node):
return prev_node_deriv * next_node * prev_node * (1-prev_node)
def | (weights, curr_nodes, prev_node_derivs, is_last_level):
curr_node_derivs = np.zeros(curr_nodes.shape)
for i in range(len(curr_nodes)):
product = 0
for j in range(len(weights[i])):
k = j
if not is_last_level: k += 1
product += weights[i][j] * prev_node_derivs[k]
curr_node_derivs[i] = product
return curr_node_derivs
def import_data(path, num_examples):
"""Imports the data at the given path to a csv file with the given amount of examples."""
data = np.empty((num_examples, 5), dtype="float128")
y = np.empty((num_examples, 1), dtype="float128")
with open(path, 'r') as f:
i = 0
for line in f:
example = []
terms = line.strip().split(',')
for j in range(len(terms)):
if j == 4:
y[i] = 2 * float(terms[j]) - 1
else:
example.append(float(terms[j]))
data[i, 1:] = example
data[i, 0] = 1
i += 1
data = normalize(np.asmatrix(data), axis=0)
return [data, np.asmatrix(y)]
def run_example():
widths = np.array([3, 3, 3, 1])
train_x = np.array([1., 1., 1.])
train_y = np.array([1])
weights = np.array([
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1.], [2.], [- | compute_node_derivatives | identifier_name |
NeuralNetworks.py | = calculate_predictions(test_data,test_y, widths, activations, weights)
test_error = calculate_error(test_y, test_predictions)
errors.append([width, train_error, test_error])
print(width, " COMPLETE")
for error in errors:
print("Width:", error[0], ", Train Error:", error[1], ", Test Error:", error[2])
def compute_hyperparameters(activations, deriv_activations, zeros):
gammas = [1, 0.5]
ds = [1, 0.1]
smallest = [0, 100.0, 100.0]
for gamma in gammas:
for d in ds: smallest = get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros)
print("----------------------")
return smallest
def get_smallest_error(smallest, gamma, d, activations, deriv_activations, zeros):
# Computes the error for the given parameters and returns the error if it is the smallest, or the previous smallest.
widths = [5, 5, 5, 1]
weights = run_sgd(gamma, d, widths, activations, deriv_activations, zeros)
predictions = calculate_predictions(train_data, train_y, widths, activations, weights)
error = calculate_error(train_y, predictions)
print("GAMMA:", gamma, " D:", d, " ERROR:", error)
if error < smallest[2]: smallest = [gamma, d, error]
return smallest
def calculate_predictions(data, y, widths, activations, weights):
predictions = copy.deepcopy(y)
for i in range(len(data)):
predictions[i] = np.sign(run_forward_pass(weights, data[i], widths, activations)[-1])
return predictions
def calculate_error(y, predictions):
return 1 - np.count_nonzero(np.multiply(y, predictions) == 1) / len(y)
| loss = []
for epoch in range(100):
learning_rate = update_learning_rate(initial_gamma, d, epoch)
[y, x] = shuffle_data(train_y, train_data)
l = 0
for i in range(n):
nodes = run_forward_pass(weights, x[i], widths, activations)
prediction = np.sign(nodes[-1])
weights_grad = run_backpropagation(weights, nodes, y[i], prediction, deriv_activations)
weights = update_weights(weights, learning_rate, weights_grad)
l += compute_loss(prediction, y[i])
loss.append(l)
# print("LOSS:", loss)
return weights
def create_weights(widths, zeros):
weights = []
for level in range(len(widths) - 2):
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, widths[level + 1] - 1).tolist())
else:
temp.append([0] * (widths[level + 1] - 1))
weights.append(temp)
temp = []
for j in range(widths[level]):
if not zeros:
temp.append(np.random.normal(0, 0.1, 1).tolist())
else:
temp.append([1])
weights.append(temp)
return np.array(weights)
def shuffle_data(y, data):
"""Shuffles the given data by appending y to the data, then shuffling, then returns the separated data and y."""
combined = np.c_[data.reshape(len(data), -1), y.reshape(len(y), -1)]
np.random.shuffle(combined)
shuffled_data = combined[:, :data.size // len(data)].reshape(data.shape)
shuffled_y = combined[:, data.size // len(data):].reshape(y.shape)
return [shuffled_y, shuffled_data]
def update_learning_rate(initial_gamma, d, epoch):
return initial_gamma / (1.0 + epoch * (initial_gamma / d))
def update_weights(weights, learning_rate, weights_grad):
for i in range(len(weights_grad)):
for j in range(len(weights_grad[i])):
for k in range(len(weights_grad[i][j])):
if type(weights[i][j][k]) == np.matrix:
weights[i][j][k][0, 0] -= learning_rate * weights_grad[i][j][k][0, 0]
else:
weights[i][j][k] -= learning_rate * weights_grad[i][j][k]
return weights
def compute_loss(prediction, label):
return np.square(prediction[0] - label[0, 0]) / 2
# Forward Pass
def run_forward_pass(weights, example, widths, activations):
shape = []
for i in range(len(widths)):
shape.append(np.zeros(widths[i]))
nodes = np.array(shape)
nodes[0] = example
for i in range(1, len(nodes)):
nodes[i] = activations[i-1](widths[i], weights[i-1], nodes[i-1])
return nodes
def linear_activation(width, weights, prev_nodes):
curr_nodes = np.zeros(width)
for j in range(len(curr_nodes)):
for i in range(len(prev_nodes)):
curr_nodes[j] += prev_nodes[i] * weights[i][j]
return curr_nodes
def sigmoid_activation(width, weights, prev_nodes):
prev_nodes = copy.deepcopy(prev_nodes)
if prev_nodes.ndim > 1:
prev_nodes = np.asarray(prev_nodes.T)
prev_nodes = prev_nodes[:, 0]
curr_nodes = np.zeros(width)
curr_nodes[0] = 1
for j in range(len(curr_nodes) - 1):
z = 0
for i in range(len(prev_nodes)):
z += prev_nodes[i] * weights[i][j]
curr_nodes[j + 1] = compute_sigmoid(z)
return curr_nodes
def compute_sigmoid(z):
return 1/(1+np.exp(-z))
# Backpropagation
def run_backpropagation(weights, nodes, y, prediction, activations):
loss_deriv = prediction - y
prev_node_derivs = [loss_deriv]
weight_derivs = copy.deepcopy(weights)
is_last_level = True
for level in range(len(weights) - 1, -1, -1):
weight_derivs[level] = compute_weight_derivs(weight_derivs[level], prev_node_derivs, nodes[level+1], nodes[level], activations[level])
prev_node_derivs = compute_node_derivatives(weights[level], nodes[level], prev_node_derivs, is_last_level)
is_last_level = False
return weight_derivs
def compute_weight_derivs(weight_derivs, prev_node_derivs, prev_nodes, next_nodes, activation):
start = 0
if activation == sigmoid_activation_deriv: start = 1
for i in range(len(weight_derivs)):
for j in range(start, len(weight_derivs[i]) + start):
if next_nodes.ndim == 2:
next_nodes = copy.deepcopy(next_nodes)
next_nodes = np.asarray(next_nodes.T)
next_nodes = next_nodes[:, 0]
weight_derivs[i][j-start] = activation(prev_node_derivs[j], next_nodes[i], prev_nodes[j])
return weight_derivs
def linear_activation_deriv(prev_node_deriv, next_node, _):
return prev_node_deriv[0] * next_node
def sigmoid_activation_deriv(prev_node_deriv, next_node, prev_node):
return prev_node_deriv * next_node * prev_node * (1-prev_node)
def compute_node_derivatives(weights, curr_nodes, prev_node_derivs, is_last_level):
curr_node_derivs = np.zeros(curr_nodes.shape)
for i in range(len(curr_nodes)):
product = 0
for j in range(len(weights[i])):
k = j
if not is_last_level: k += 1
product += weights[i][j] * prev_node_derivs[k]
curr_node_derivs[i] = product
return curr_node_derivs
def import_data(path, num_examples):
"""Imports the data at the given path to a csv file with the given amount of examples."""
data = np.empty((num_examples, 5), dtype="float128")
y = np.empty((num_examples, 1), dtype="float128")
with open(path, 'r') as f:
i = 0
for line in f:
example = []
terms = line.strip().split(',')
for j in range(len(terms)):
if j == 4:
y[i] = 2 * float(terms[j]) - 1
else:
example.append(float(terms[j]))
data[i, 1:] = example
data[i, 0] = 1
i += 1
data = normalize(np.asmatrix(data), axis=0)
return [data, np.asmatrix(y)]
def run_example():
widths = np.array([3, 3, 3, 1])
train_x = np.array([1., 1., 1.])
train_y = np.array([1])
weights = np.array([
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1., 1.], [-2., 2.], [-3., 3.]],
[[-1.], [2.], [- |
def run_sgd(initial_gamma, d, widths, activations, deriv_activations, zeros, n=872):
weights = create_weights(widths, zeros) | random_line_split |
updateTotalCompany.js | 03_009,RE9003_010 from [dbo].[tRE9003] where flag<> 1';
let res = await Mssql.connect(config.mssql_rate).query(sql);
let setRateConvertCost = Date.now() - now;
let rows = res.recordset;
let fetched = rows.length; //每次查询SQL Server的实际记录数
if (fetched > 0) {
for (let i = 0; i < rows.length; i++) {
let MY = rows[i].RE9003_001; //美元
let OY = rows[i].RE9003_002; //欧元
let RY = rows[i].RE9003_003; //日元
let GY = rows[i].RE9003_004; //港元
let YB = rows[i].RE9003_005; //英镑
let JNDY = rows[i].RE9003_006; //加拿大元
let ODLYY = rows[i].RE9003_007; //澳大利亚元
let XXLY = rows[i].RE9003_008; //新西兰元
let XJPY = rows[i].RE9003_009; //新加坡元
let RSFL = rows[i].RE9003_010; //瑞士法郎
let obj = {
MY: `${MY}`,
OY: `${OY}`,
RY: `${RY}`,
GY: `${GY}`,
YB: `${YB}`,
JNDY: `${JNDY}`,
ODLYY: `${ODLYY}`,
XXLY: `${XXLY}`,
XJPY: `${XJPY}`,
RSFL: `${RSFL}`
};
myCache.set("currencyRate", obj, function (err, success) {
if (!err && success) {
return resolve(success);
console.log(success);
logger.info('myCache set currencyRate status: ' + success);
console.log('setRateConvertCost: ' + setRateConvertCost + 'ms');
logger.info('setRateConvertCost: ' + setRateConvertCost + 'ms');
}
});
}
}
} catch (err) {
console.error(err);
logger.error(err);
return reject(err);
}
});
}
//get汇率转换
function getRateConvert() {
let res = {};
try {
myCache.get("currencyRate", function (err, value) {
if (!err) {
| (value == undefined) {
console.log('can not get the currencyRate value');
logger.info('can not get the currencyRate value');
return ({});
} else {
res = value;
console.log('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
logger.info('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
}
}
});
return res;
} catch (err) {
console.log(err);
logger.error(err);
return reject(err);
}
}
//判断货币类型
function judgeCurrencyFlag(currencyFlag) {
let rateFlag = 'RMB';
if (currencyFlag == 840) rateFlag = 'MY';
else if (currencyFlag == 954) rateFlag = 'OY';
else if (currencyFlag == 392) rateFlag = 'RY';
else if (currencyFlag == 344) rateFlag = 'GY';
else if (currencyFlag == 826) rateFlag = 'YB';
else if (currencyFlag == 124) rateFlag = 'JNDY';
else if (currencyFlag == 36) rateFlag = 'ODLYY';
else if (currencyFlag == 554) rateFlag = 'XXLY';
else if (currencyFlag == 702) rateFlag = 'XJPY';
else if (currencyFlag == 756) rateFlag = 'RSFL';
else if (currencyFlag == 156 || currencyFlag == 0) rateFlag = 'RMB';
return rateFlag;
}
let updateTotalCompany = {
startQueryCompany: async function (flag) {
if (flag) {
try {
//set汇率转换
let covertFlag = await setRateConvert();
let rateValueMap = {};
if (covertFlag == true) {
rateValueMap = getRateConvert();
}
let id = config.updateInfo.companyId;
let i = 1;
let ctx = await transactions.getContext(id);
let fetched = 0;
if (!ctx.last)
ctx.last = 0; //全量更新置0
let resultCount = 0;
let startTime = Date.now();
let updateInfo = {'logInfo': '', 'updateStatus': 0};
let CSVFilePath = '../neo4jDB_update/totalData/companies.csv'; //windows
// writeLineStream第一个参数为ReadStream实例,也可以为文件名
let w = writeLineStream(fs.createWriteStream(CSVFilePath), {
// 换行符,默认\n
newline: '\n',
// 编码器,可以为函数或字符串(内置编码器:json,base64),默认null
encoding: function (data) {
return data;
},
// 缓存的行数,默认为0(表示不缓存),此选项主要用于优化写文件性能,当数量缓存的内容超过该数量时再一次性写入到流中,可以提高写速度
cacheLines: 0
});
// let line1 = 'ITCode2:ID,ITName:string';
let line1 = 'timestamp:string,isPerson:string,ITCode2:ID,RMBFund:float,regFund:float,regFundUnit:string,isExtra:string,surStatus:string,originTable:string,isBranches:string';
w.write(line1);
let originTable = 'tCR0001_V2.0'; //数据来源
do {
let rows = [];
let now = Date.now();
let sql = `
select top 10000 cast(tmstamp as bigint) as _ts, ITCode2,CR0001_005,CR0001_006,CR0001_040,CR0001_041 from [tCR0001_V2.0] WITH(READPAST)
where flag<> 1 and tmstamp > cast( cast(${ctx.last} as bigint) as binary(8)) order by tmstamp;
`;
let res = await Mssql.connect(config.mssql).query(sql);
let queryCost = Date.now() - now;
rows = res.recordset;
fetched = rows.length; //每次查询SQL Server的实际记录数
writeStart = Date.now();
if (fetched > 0) {
resultCount += fetched;
let lines = [];
let codes = [];
for (let i = 0; i < rows.length; i++) {
let rate = null; //汇率标识
let rateValue = 1;
let ITCode = rows[i].ITCode2;
let timestamp = rows[i]._ts;
if (ITCode) {
codes.push(ITCode);
}
if (!ITCode) { //如果ITCode为null,则传入UUID,并在node上的isExtra置1;
ITCode = rows[i]._ts + transactions.createRndNum(6); //产生6位随机数 + timestamp作为ITCode
isExtra = 1; //1代表没有机构代码
}
else {
isExtra = 0;
}
let fund = rows[i].CR0001_005; //注册资金,未换算的值
let currencyUnit = rows[i].CR0001_006; //货币类型
let currency | if | identifier_name |
updateTotalCompany.js | 03_009,RE9003_010 from [dbo].[tRE9003] where flag<> 1';
let res = await Mssql.connect(config.mssql_rate).query(sql);
let setRateConvertCost = Date.now() - now;
let rows = res.recordset;
let fetched = rows.length; //每次查询SQL Server的实际记录数
if (fetched > 0) {
for (let i = 0; i < rows.length; i++) {
let MY = rows[i].RE9003_001; //美元
let OY = rows[i].RE9003_002; //欧元
let RY = rows[i].RE9003_003; //日元
let GY = rows[i].RE9003_004; //港元
let YB = rows[i].RE9003_005; //英镑
let JNDY = rows[i].RE9003_006; //加拿大元
let ODLYY = rows[i].RE9003_007; //澳大利亚元
let XXLY = rows[i].RE9003_008; //新西兰元
let XJPY = rows[i].RE9003_009; //新加坡元
let RSFL = rows[i].RE9003_010; //瑞士法郎
let obj = {
MY: `${MY}`,
OY: `${OY}`,
RY: `${RY}`,
GY: `${GY}`,
YB: `${YB}`,
JNDY: `${JNDY}`,
ODLYY: `${ODLYY}`,
XXLY: `${XXLY}`,
XJPY: `${XJPY}`,
RSFL: `${RSFL}`
};
myCache.set("currencyRate", obj, function (err, success) {
if (!err && success) {
return resolve(success);
console.log(success);
logger.info('myCache set currencyRate status: ' + success);
console.log('setRateConvertCost: ' + setRateConvertCost + 'ms');
logger.info('setRateConvertCost: ' + setRateConvertCost + 'ms');
}
});
}
}
} catch (err) {
console.error(err);
logger.error(err);
return reject(err);
}
});
}
//get汇率转换
function getRateConvert() {
let res = {};
try {
myCache.get("currencyRate", function (err, value) {
if (!err) {
if (value == undefined) {
console.log('can not get the currencyRate value');
logger.info('can not get the currencyRate value');
return ({});
} else {
res = value;
console.log('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
logger.info('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
}
}
});
return res;
} catch (err) {
console.log(err);
logger.error(err);
return reject(err);
}
}
//判断货币类型
function judgeCurrencyFlag(currencyFlag) {
let rateFlag = 'RMB';
if (currencyFlag == 840) rateFlag = 'MY';
else if (currencyFlag == 954) rateFlag = 'OY';
else if (currencyFlag == 392) rateFlag = 'RY';
else if (currencyFlag == 344) rateFlag = 'GY';
else if (currencyFlag == 826) rateFlag = 'YB';
else if (currencyFlag == 124) rateFlag = 'JNDY';
else if (currencyFlag == 36) rateFlag = 'ODLYY';
else if (currencyFlag == 554) rateFlag = 'XXLY';
else if (currencyFlag == 702) rateFlag = 'XJPY';
else if (currencyFlag == 756) rateFlag = 'RSFL';
else if (currencyFlag == 156 || currencyFlag == 0) rateFlag = 'RMB';
return rateFlag;
}
let updateTotalCompany = {
startQueryCompany: async function (flag) {
if (flag) {
try {
//set汇率转换
let covertFlag = await setRateConvert();
let rateValueMap = {};
if (covertFlag == true) {
rateValueMap = getRateConvert();
}
let id = config.updateInfo.companyId;
let i = 1;
let ctx = await transactions.getContext(id);
let fetched = 0;
if (!ctx.last)
ctx.last = 0; //全量更新置0
let resultCount = 0;
let startTime = Date.now();
let updateInfo = {'logInfo': '', 'updateStatus': 0};
let CSVFilePath = '../neo4jDB_update/totalData/companies.csv'; //windows
// writeLineStream第一个参数为ReadStream实例,也可以为文件名
let w = writeLineStream(fs.createWriteStream(CSVFilePath), {
// 换行符,默认\n
newline: '\n',
// 编码器,可以为函数或字符串(内置编码器:json,base64),默认null
encoding: function (data) {
return data;
},
// 缓存的行数,默认为0(表示不缓存),此选项主要用于优化写文件性能,当数量缓存的内容超过该数量时再一次性写入到流中,可以提高写速度
cacheLines: 0
});
// let line1 = 'ITCode2:ID,ITName:string';
let line1 = 'timestamp:string,isPerson:string,ITCode2:ID,RMBFund:float,regFund:float,regFundUnit:string,isExtra:string,surStatus:string,originTable:string,isBranches:string';
w.write(line1);
let originTable = 'tCR0001_V2.0'; //数据来源
do {
let rows = [];
let now = Date.now();
let sql = `
select top 10000 cast(tmstamp as bigint) as _ts, ITCode2,CR0001_005,CR0001_006,CR0001_040,CR0001_041 from [tCR0001_V2.0] WITH(READPAST)
where flag<> 1 and tmstamp > cast( cast(${ctx.last} as bigint) as binary(8)) order by tmstamp;
`;
let res = await Mssql.connect(config.mssql).query(sql);
let queryCost = Date.now() - now;
rows = res.recordset;
fetched = rows.length; //每次查询SQL Server的实际记录数
writeStart = Date.now();
if (fetched > 0) {
resultCount += fetched;
let lines = [];
let codes = [];
for (let i = 0; i < rows.length; i++) {
let rate = null; //汇率标识
let rateValue = 1;
let ITCode = rows[i].ITCode2;
let timestamp = rows[i]._ts;
if (ITCode) {
codes.push(ITCode);
}
if (!ITCode) { //如果ITCode为null,则传入UUID,并在node上的isExtra置1;
ITCode = rows[i]._ts + transactions.createRndNum(6); //产生6位随机数 + timestamp作为ITCode
isExtra = 1; //1代表没有机构代码
}
else {
isEx | i].CR0001_005; //注册资金,未换算的值
let currencyUnit = rows[i].CR0001_006; //货币类型
let | tra = 0;
}
let fund = rows[ | conditional_block |
updateTotalCompany.js | 03_009,RE9003_010 from [dbo].[tRE9003] where flag<> 1';
let res = await Mssql.connect(config.mssql_rate).query(sql);
let setRateConvertCost = Date.now() - now;
let rows = res.recordset;
let fetched = rows.length; //每次查询SQL Server的实际记录数
if (fetched > 0) {
for (let i = 0; i < rows.length; i++) {
let MY = rows[i].RE9003_001; //美元
let OY = rows[i].RE9003_002; //欧元
let RY = rows[i].RE9003_003; //日元
let GY = rows[i].RE9003_004; //港元
let YB = rows[i].RE9003_005; //英镑
let JNDY = rows[i].RE9003_006; //加拿大元
let ODLYY = rows[i].RE9003_007; //澳大利亚元
let XXLY = rows[i].RE9003_008; //新西兰元
let XJPY = rows[i].RE9003_009; //新加坡元
let RSFL = rows[i].RE9003_010; //瑞士法郎
let obj = {
MY: `${MY}`,
OY: `${OY}`,
RY: `${RY}`,
GY: `${GY}`,
YB: `${YB}`,
JNDY: `${JNDY}`,
ODLYY: `${ODLYY}`,
XXLY: `${XXLY}`,
XJPY: `${XJPY}`,
RSFL: `${RSFL}`
};
myCache.set("currencyRate", obj, function (err, success) {
if (!err && success) {
return resolve(success);
console.log(success);
logger.info('myCache set currencyRate status: ' + success);
console.log('setRateConvertCost: ' + setRateConvertCost + 'ms');
logger.info('setRateConvertCost: ' + setRateConvertCost + 'ms');
}
});
}
}
} catch (err) {
console.error(err);
logger.error(err);
return reject(err);
}
});
}
//get汇率转换
function getRateConvert() {
let res = {};
try {
myCache.get("currencyRate", function (err, value) {
if (!err) {
if (v | function judgeCurrencyFlag(currencyFlag) {
let rateFlag = 'RMB';
if (currencyFlag == 840) rateFlag = 'MY';
else if (currencyFlag == 954) rateFlag = 'OY';
else if (currencyFlag == 392) rateFlag = 'RY';
else if (currencyFlag == 344
) rateFlag = 'GY';
else if (currencyFlag == 826) rateFlag = 'YB';
else if (currencyFlag == 124) rateFlag = 'JNDY';
else if (currencyFlag == 36) rateFlag = 'ODLYY';
else if (currencyFlag == 554) rateFlag = 'XXLY';
else if (currencyFlag == 702) rateFlag = 'XJPY';
else if (currencyFlag == 756) rateFlag = 'RSFL';
else if (currencyFlag == 156 || currencyFlag == 0) rateFlag = 'RMB';
return rateFlag;
}
let updateTotalCompany = {
startQueryCompany: async function (flag) {
if (flag) {
try {
//set汇率转换
let covertFlag = await setRateConvert();
let rateValueMap = {};
if (covertFlag == true) {
rateValueMap = getRateConvert();
}
let id = config.updateInfo.companyId;
let i = 1;
let ctx = await transactions.getContext(id);
let fetched = 0;
if (!ctx.last)
ctx.last = 0; //全量更新置0
let resultCount = 0;
let startTime = Date.now();
let updateInfo = {'logInfo': '', 'updateStatus': 0};
let CSVFilePath = '../neo4jDB_update/totalData/companies.csv'; //windows
// writeLineStream第一个参数为ReadStream实例,也可以为文件名
let w = writeLineStream(fs.createWriteStream(CSVFilePath), {
// 换行符,默认\n
newline: '\n',
// 编码器,可以为函数或字符串(内置编码器:json,base64),默认null
encoding: function (data) {
return data;
},
// 缓存的行数,默认为0(表示不缓存),此选项主要用于优化写文件性能,当数量缓存的内容超过该数量时再一次性写入到流中,可以提高写速度
cacheLines: 0
});
// let line1 = 'ITCode2:ID,ITName:string';
let line1 = 'timestamp:string,isPerson:string,ITCode2:ID,RMBFund:float,regFund:float,regFundUnit:string,isExtra:string,surStatus:string,originTable:string,isBranches:string';
w.write(line1);
let originTable = 'tCR0001_V2.0'; //数据来源
do {
let rows = [];
let now = Date.now();
let sql = `
select top 10000 cast(tmstamp as bigint) as _ts, ITCode2,CR0001_005,CR0001_006,CR0001_040,CR0001_041 from [tCR0001_V2.0] WITH(READPAST)
where flag<> 1 and tmstamp > cast( cast(${ctx.last} as bigint) as binary(8)) order by tmstamp;
`;
let res = await Mssql.connect(config.mssql).query(sql);
let queryCost = Date.now() - now;
rows = res.recordset;
fetched = rows.length; //每次查询SQL Server的实际记录数
writeStart = Date.now();
if (fetched > 0) {
resultCount += fetched;
let lines = [];
let codes = [];
for (let i = 0; i < rows.length; i++) {
let rate = null; //汇率标识
let rateValue = 1;
let ITCode = rows[i].ITCode2;
let timestamp = rows[i]._ts;
if (ITCode) {
codes.push(ITCode);
}
if (!ITCode) { //如果ITCode为null,则传入UUID,并在node上的isExtra置1;
ITCode = rows[i]._ts + transactions.createRndNum(6); //产生6位随机数 + timestamp作为ITCode
isExtra = 1; //1代表没有机构代码
}
else {
isExtra = 0;
}
let fund = rows[i].CR0001_005; //注册资金,未换算的值
let currencyUnit = rows[i].CR0001_006; //货币类型
let | alue == undefined) {
console.log('can not get the currencyRate value');
logger.info('can not get the currencyRate value');
return ({});
} else {
res = value;
console.log('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
logger.info('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
}
}
});
return res;
} catch (err) {
console.log(err);
logger.error(err);
return reject(err);
}
}
//判断货币类型
| identifier_body |
updateTotalCompany.js | 03_009,RE9003_010 from [dbo].[tRE9003] where flag<> 1';
let res = await Mssql.connect(config.mssql_rate).query(sql);
let setRateConvertCost = Date.now() - now;
let rows = res.recordset;
let fetched = rows.length; //每次查询SQL Server的实际记录数
if (fetched > 0) {
for (let i = 0; i < rows.length; i++) {
let MY = rows[i].RE9003_001; //美元
let OY = rows[i].RE9003_002; //欧元
let RY = rows[i].RE9003_003; //日元
let GY = rows[i].RE9003_004; //港元
let YB = rows[i].RE9003_005; //英镑
let JNDY = rows[i].RE9003_006; //加拿大元
let ODLYY = rows[i].RE9003_007; //澳大利亚元
let XXLY = rows[i].RE9003_008; //新西兰元
let XJPY = rows[i].RE9003_009; //新加坡元
let RSFL = rows[i].RE9003_010; //瑞士法郎
let obj = {
MY: `${MY}`,
OY: `${OY}`,
RY: `${RY}`,
GY: `${GY}`,
YB: `${YB}`,
JNDY: `${JNDY}`,
ODLYY: `${ODLYY}`,
XXLY: `${XXLY}`,
XJPY: `${XJPY}`,
RSFL: `${RSFL}`
};
myCache.set("currencyRate", obj, function (err, success) {
if (!err && success) {
return resolve(success);
console.log(success);
logger.info('myCache set currencyRate status: ' + success);
console.log('setRateConvertCost: ' + setRateConvertCost + 'ms');
logger.info('setRateConvertCost: ' + setRateConvertCost + 'ms');
}
});
}
}
} catch (err) {
console.error(err);
logger.error(err);
return reject(err);
}
});
}
//get汇率转换
function getRateConvert() {
let res = {};
try {
myCache.get("currencyRate", function (err, value) {
if (!err) {
if (value == undefined) {
console.log('can not get the currencyRate value');
logger.info('can not get the currencyRate value');
return ({});
} else {
res = value;
console.log('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
logger.info('the currencyRate value: ' + '美元: ' + value.MY + ', 欧元: ' + value.OY + ', 日元: ' + value.RY + ', 香港元: ' + value.GY + ', 英镑: ' + value.YB + ', 加拿大元: ' + value.JNDY + ', 澳大利亚元: ' + value.ODLYY + ', 新西兰元: ' + value.XXLY + ', 新加坡元: ' + value.XJPY + ', 瑞士法郎: ' + value.RSFL);
}
}
});
return res;
} catch (err) {
console.log(err);
logger.error(err);
return reject(err);
}
}
//判断货币类型
function judgeCurrencyFlag(currencyFlag) {
let rateFlag = 'RMB';
if (currencyFlag == 840) rateFlag = 'MY';
else if (currencyFlag == 954) rateFlag = 'OY';
else if (currencyFlag == 392) rateFlag = 'RY';
else if (currencyFlag == 344) rateFlag = 'GY';
else if (currencyFlag == 826) rateFlag = 'YB';
else if (currencyFlag == 124) rateFlag = 'JNDY';
else if (currencyFlag == 36) rateFlag = 'ODLYY';
else if (currencyFlag == 554) rateFlag = 'XXLY';
else if (currencyFlag == 702) rateFlag = 'XJPY';
else if (currencyFlag == 756) rateFlag = 'RSFL';
else if (currencyFlag == 156 || currencyFlag == 0) rateFlag = 'RMB';
return rateFlag;
}
let updateTotalCompany = {
startQueryCompany: async function (flag) {
if (flag) {
try {
//set汇率转换
let covertFlag = await setRateConvert();
let rateValueMap = {};
if (covertFlag == true) {
rateValueMap = getRateConvert();
}
let id = config.updateInfo.companyId;
let i = 1;
let ctx = await transactions.getContext(id);
let fetched = 0;
if (!ctx.last)
ctx.last = 0; //全量更新置0
let resultCount = 0;
let startTime = Date.now();
let updateInfo = {'logInfo': '', 'updateStatus': 0};
let CSVFilePath = '../neo4jDB_update/totalData/companies.csv'; //windows
// writeLineStream第一个参数为ReadStream实例,也可以为文件名
let w = writeLineStream(fs.createWriteStream(CSVFilePath), {
// 换行符,默认\n
newline: '\n',
// 编码器,可以为函数或字符串(内置编码器:json,base64),默认null
encoding: function (data) {
return data;
},
// 缓存的行数,默认为0(表示不缓存),此选项主要用于优化写文件性能,当数量缓存的内容超过该数量时再一次性写入到流中,可以提高写速度
cacheLines: 0
});
// let line1 = 'ITCode2:ID,ITName:string';
let line1 = 'timestamp:string,isPerson:string,ITCode2:ID,RMBFund:float,regFund:float,regFundUnit:string,isExtra:string,surStatus:string,originTable:string,isBranches:string';
w.write(line1);
let originTable = 'tCR0001_V2.0'; //数据来源
| do {
let rows = [];
let now = Date.now();
let sql = `
select top 10000 cast(tmstamp as bigint) as _ts, ITCode2,CR0001_005,CR0001_006,CR0001_040,CR0001_041 from [tCR0001_V2.0] WITH(READPAST)
where flag<> 1 and tmstamp > cast( cast(${ctx.last} as bigint) as binary(8)) order by tmstamp;
`;
let res = await Mssql.connect(config.mssql).query(sql);
let queryCost = Date.now() - now;
rows = res.recordset;
fetched = rows.length; //每次查询SQL Server的实际记录数
writeStart = Date.now();
if (fetched > 0) {
resultCount += fetched;
let lines = [];
let codes = [];
for (let i = 0; i < rows.length; i++) {
let rate = null; //汇率标识
let rateValue = 1;
let ITCode = rows[i].ITCode2;
let timestamp = rows[i]._ts;
if (ITCode) {
codes.push(ITCode);
}
if (!ITCode) { //如果ITCode为null,则传入UUID,并在node上的isExtra置1;
ITCode = rows[i]._ts + transactions.createRndNum(6); //产生6位随机数 + timestamp作为ITCode
isExtra = 1; //1代表没有机构代码
}
else {
isExtra = 0;
}
let fund = rows[i].CR0001_005; //注册资金,未换算的值
let currencyUnit = rows[i].CR0001_006; //货币类型
let currencyFlag | random_line_split |
|
words.go | pector",
"king",
"ladder",
"menu",
"penalty",
"piano",
"potato",
"profession",
"professor",
"quantity",
"reaction",
"requirement",
"salad",
"sister",
"supermarket",
"tongue",
"weakness",
"wedding",
"affair",
"ambition",
"analyst",
"apple",
"assignment",
"assistant",
"bathroom",
"bedroom",
"beer",
"birthday",
"celebration",
"championship",
"cheek",
"client",
"consequence",
"departure",
"diamond",
"dirt",
"ear",
"fortune",
"friendship",
"funeral",
"gene",
"girlfriend",
"hat",
"indication",
"intention",
"lady",
"midnight",
"negotiation",
"obligation",
"passenger",
"pizza",
"platform",
"poet",
"pollution",
"recognition",
"reputation",
"shirt",
"sir",
"speaker",
"stranger",
"surgery",
"sympathy",
"tale",
"throat",
"trainer",
"uncle",
"youth",
"time",
"work",
"film",
"water",
"money",
"example",
"while",
"business",
"study",
"game",
"life",
"form",
"air",
"day",
"place",
"number",
"part",
"field",
"fish",
"back",
"process",
"heat",
"hand",
"experience",
"job",
"book",
"end",
"point",
"type",
"home",
"economy",
"value",
"body",
"market",
"guide",
"interest",
"state",
"radio",
"course",
"company",
"price",
"size",
"card",
"list",
"mind",
"trade",
"line",
"care",
"group",
"risk",
"word",
"fat",
"force",
"key",
"light",
"training",
"name",
"school",
"top",
"amount",
"level",
"order",
"practice",
"research",
"sense",
"service",
"piece",
"web",
"boss",
"sport",
"fun",
"house",
"page",
"term",
"test",
"answer",
"sound",
"focus",
"matter",
"kind",
"soil",
"board",
"oil",
"picture",
"access",
"garden",
"range",
"rate",
"reason",
"future",
"site",
"demand",
"exercise",
"image",
"case",
"cause",
"coast",
"action",
"age",
"bad",
"boat",
"record",
"result",
"section",
"building",
"mouse",
"cash",
"class",
"nothing",
"period",
"plan",
"store",
"tax",
"side",
"subject",
"space",
"rule",
"stock",
"weather",
"chance",
"figure",
"man",
"model",
"source",
"beginning",
"earth",
"program",
"chicken",
"design",
"feature",
"head",
"material",
"purpose",
"question",
"rock",
"salt",
"act",
"birth",
"car",
"dog",
"object",
"scale",
"sun",
"note",
"profit",
"rent",
"speed",
"style",
"war",
"bank",
"craft",
"half",
"inside",
"outside",
"standard",
"bus",
"exchange",
"eye",
"fire",
"position",
"pressure",
"stress",
"advantage",
"benefit",
"box",
"frame",
"issue",
"step",
"cycle",
"face",
"item",
"metal",
"paint",
"review",
"room",
"screen",
"structure",
"view",
"account",
"ball",
"discipline",
"medium",
"share",
"balance",
"bit",
"black",
"bottom",
"choice",
"gift",
"impact",
"machine",
"shape",
"tool",
"wind",
"address",
"average",
"career",
"culture",
"morning",
"pot",
"sign",
"table",
"task",
"condition",
"contact",
"credit",
"egg",
"hope",
"ice",
"network",
"north",
"square",
"attempt",
"date",
"effect",
"link",
"post",
"star",
"voice",
"capital",
"challenge",
"friend",
"self",
"shot",
"brush",
"couple",
"debate",
"exit",
"front",
"function",
"lack",
"living",
"plant",
"plastic",
"spot",
"summer",
"taste",
"theme",
"track",
"wing",
"brain",
"button",
"click",
"desire",
"foot",
"gas",
"influence",
"notice",
"rain",
"wall",
"base",
"damage",
"distance",
"feeling",
"pair",
"savings",
"staff",
"sugar",
"target",
"text",
"animal",
"author",
"budget",
"discount",
"file",
"ground",
"lesson",
"minute",
"officer",
"phase",
"reference",
"register",
"sky",
"stage",
"stick",
"title",
"trouble",
"bowl",
"bridge",
"campaign",
"character",
"club",
"edge",
"evidence",
"fan",
"letter",
"lock",
"maximum",
"novel",
"option",
"pack",
"park",
"plenty",
"quarter",
"skin",
"sort",
"weight",
"baby",
"background",
"carry",
"dish",
"factor",
"fruit",
"glass",
"joint",
"master",
"muscle",
"red",
"strength",
"traffic",
"trip",
"vegetable",
"appeal",
"chart",
"gear",
"ideal",
"kitchen",
"land",
"log",
"mother",
"net",
"party",
"principle",
"relative",
"sale",
"season",
"signal", | "spirit",
"street",
"tree",
"wave", | random_line_split |
|
words.go | () string {
abuff := []string{}
total := 0
words := 0
for {
w := ranWord()
words += 1
total += len(w)
abuff = append(abuff, w)
if total >= 40 && words > 4 {
break
}
}
buff := ""
for _, x := range abuff {
first := x[0:1]
last := x[1:len(x)]
first = strings.ToUpper(first)
buff += first + last
}
return buff
}
func ranWord() string {
list := []string{
"people",
"history",
"way",
"art",
"world",
"information",
"map",
"two",
"family",
"government",
"health",
"system",
"computer",
"meat",
"year",
"thanks",
"music",
"person",
"reading",
"method",
"data",
"food",
"understanding",
"theory",
"law",
"bird",
"literature",
"problem",
"software",
"control",
"knowledge",
"power",
"ability",
"economics",
"love",
"internet",
"television",
"science",
"library",
"nature",
"fact",
"product",
"idea",
"temperature",
"investment",
"area",
"society",
"activity",
"story",
"industry",
"media",
"thing",
"oven",
"community",
"definition",
"safety",
"quality",
"development",
"language",
"management",
"player",
"variety",
"video",
"week",
"security",
"country",
"exam",
"movie",
"organization",
"equipment",
"physics",
"analysis",
"policy",
"series",
"thought",
"basis",
"boyfriend",
"direction",
"strategy",
"technology",
"army",
"camera",
"freedom",
"paper",
"environment",
"child",
"instance",
"month",
"truth",
"marketing",
"university",
"writing",
"article",
"department",
"difference",
"goal",
"news",
"audience",
"fishing",
"growth",
"income",
"marriage",
"user",
"combination",
"failure",
"meaning",
"medicine",
"philosophy",
"teacher",
"communication",
"night",
"chemistry",
"disease",
"disk",
"energy",
"nation",
"road",
"role",
"soup",
"advertising",
"location",
"success",
"addition",
"apartment",
"education",
"math",
"moment",
"painting",
"politics",
"attention",
"decision",
"event",
"property",
"shopping",
"student",
"wood",
"competition",
"distribution",
"entertainment",
"office",
"population",
"president",
"unit",
"category",
"cigarette",
"context",
"introduction",
"opportunity",
"performance",
"driver",
"flight",
"length",
"magazine",
"newspaper",
"relationship",
"teaching",
"cell",
"dealer",
"finding",
"lake",
"member",
"message",
"phone",
"scene",
"appearance",
"association",
"concept",
"customer",
"death",
"discussion",
"housing",
"inflation",
"insurance",
"mood",
"woman",
"advice",
"blood",
"effort",
"expression",
"importance",
"opinion",
"payment",
"reality",
"responsibility",
"situation",
"skill",
"statement",
"wealth",
"application",
"city",
"county",
"depth",
"estate",
"foundation",
"grandmother",
"heart",
"perspective",
"photo",
"recipe",
"studio",
"topic",
"collection",
"depression",
"imagination",
"passion",
"percentage",
"resource",
"setting",
"ad",
"agency",
"college",
"connection",
"criticism",
"debt",
"description",
"memory",
"patience",
"secretary",
"solution",
"administration",
"aspect",
"attitude",
"director",
"personality",
"psychology",
"recommendation",
"response",
"selection",
"storage",
"version",
"alcohol",
"argument",
"complaint",
"contract",
"emphasis",
"highway",
"loss",
"membership",
"possession",
"preparation",
"steak",
"union",
"agreement",
"cancer",
"currency",
"employment",
"engineering",
"entry",
"interaction",
"mixture",
"preference",
"region",
"republic",
"tradition",
"virus",
"actor",
"classroom",
"delivery",
"device",
"difficulty",
"drama",
"election",
"engine",
"football",
"guidance",
"hotel",
"owner",
"priority",
"protection",
"suggestion",
"tension",
"variation",
"anxiety",
"atmosphere",
"awareness",
"bath",
"bread",
"candidate",
"climate",
"comparison",
"confusion",
"construction",
"elevator",
"emotion",
"employee",
"employer",
"guest",
"height",
"leadership",
"mall",
"manager",
"operation",
"recording",
"sample",
"transportation",
"charity",
"cousin",
"disaster",
"editor",
"efficiency",
"excitement",
"extent",
"feedback",
"guitar",
"homework",
"leader",
"mom",
"outcome",
"permission",
"presentation",
"promotion",
"reflection",
"refrigerator",
"resolution",
"revenue",
"session",
"singer",
"tennis",
"basket",
"bonus",
"cabinet",
"childhood",
"church",
"clothes",
"coffee",
"dinner",
"drawing",
"hair",
"hearing",
"initiative",
"judgment",
"lab",
"measurement",
"mode",
"mud",
"orange",
"poetry",
"police",
"possibility",
"procedure",
"queen",
"ratio",
"relation",
"restaurant",
"satisfaction",
"sector",
"signature",
"significance",
"song",
"tooth",
"town",
"vehicle",
"volume",
"wife",
"accident",
"airport",
"appointment",
"arrival",
"assumption",
"baseball",
"chapter",
"committee",
"conversation",
" | BigWords | identifier_name |
|
words.go | "reading",
"method",
"data",
"food",
"understanding",
"theory",
"law",
"bird",
"literature",
"problem",
"software",
"control",
"knowledge",
"power",
"ability",
"economics",
"love",
"internet",
"television",
"science",
"library",
"nature",
"fact",
"product",
"idea",
"temperature",
"investment",
"area",
"society",
"activity",
"story",
"industry",
"media",
"thing",
"oven",
"community",
"definition",
"safety",
"quality",
"development",
"language",
"management",
"player",
"variety",
"video",
"week",
"security",
"country",
"exam",
"movie",
"organization",
"equipment",
"physics",
"analysis",
"policy",
"series",
"thought",
"basis",
"boyfriend",
"direction",
"strategy",
"technology",
"army",
"camera",
"freedom",
"paper",
"environment",
"child",
"instance",
"month",
"truth",
"marketing",
"university",
"writing",
"article",
"department",
"difference",
"goal",
"news",
"audience",
"fishing",
"growth",
"income",
"marriage",
"user",
"combination",
"failure",
"meaning",
"medicine",
"philosophy",
"teacher",
"communication",
"night",
"chemistry",
"disease",
"disk",
"energy",
"nation",
"road",
"role",
"soup",
"advertising",
"location",
"success",
"addition",
"apartment",
"education",
"math",
"moment",
"painting",
"politics",
"attention",
"decision",
"event",
"property",
"shopping",
"student",
"wood",
"competition",
"distribution",
"entertainment",
"office",
"population",
"president",
"unit",
"category",
"cigarette",
"context",
"introduction",
"opportunity",
"performance",
"driver",
"flight",
"length",
"magazine",
"newspaper",
"relationship",
"teaching",
"cell",
"dealer",
"finding",
"lake",
"member",
"message",
"phone",
"scene",
"appearance",
"association",
"concept",
"customer",
"death",
"discussion",
"housing",
"inflation",
"insurance",
"mood",
"woman",
"advice",
"blood",
"effort",
"expression",
"importance",
"opinion",
"payment",
"reality",
"responsibility",
"situation",
"skill",
"statement",
"wealth",
"application",
"city",
"county",
"depth",
"estate",
"foundation",
"grandmother",
"heart",
"perspective",
"photo",
"recipe",
"studio",
"topic",
"collection",
"depression",
"imagination",
"passion",
"percentage",
"resource",
"setting",
"ad",
"agency",
"college",
"connection",
"criticism",
"debt",
"description",
"memory",
"patience",
"secretary",
"solution",
"administration",
"aspect",
"attitude",
"director",
"personality",
"psychology",
"recommendation",
"response",
"selection",
"storage",
"version",
"alcohol",
"argument",
"complaint",
"contract",
"emphasis",
"highway",
"loss",
"membership",
"possession",
"preparation",
"steak",
"union",
"agreement",
"cancer",
"currency",
"employment",
"engineering",
"entry",
"interaction",
"mixture",
"preference",
"region",
"republic",
"tradition",
"virus",
"actor",
"classroom",
"delivery",
"device",
"difficulty",
"drama",
"election",
"engine",
"football",
"guidance",
"hotel",
"owner",
"priority",
"protection",
"suggestion",
"tension",
"variation",
"anxiety",
"atmosphere",
"awareness",
"bath",
"bread",
"candidate",
"climate",
"comparison",
"confusion",
"construction",
"elevator",
"emotion",
"employee",
"employer",
"guest",
"height",
"leadership",
"mall",
"manager",
"operation",
"recording",
"sample",
"transportation",
"charity",
"cousin",
"disaster",
"editor",
"efficiency",
"excitement",
"extent",
"feedback",
"guitar",
"homework",
"leader",
"mom",
"outcome",
"permission",
"presentation",
"promotion",
"reflection",
"refrigerator",
"resolution",
"revenue",
"session",
"singer",
"tennis",
"basket",
"bonus",
"cabinet",
"childhood",
"church",
"clothes",
"coffee",
"dinner",
"drawing",
"hair",
"hearing",
"initiative",
"judgment",
"lab",
"measurement",
"mode",
"mud",
"orange",
"poetry",
"police",
"possibility",
"procedure",
"queen",
"ratio",
"relation",
"restaurant",
"satisfaction",
"sector",
"signature",
"significance",
"song",
"tooth",
"town",
"vehicle",
"volume",
"wife",
"accident",
"airport",
"appointment",
"arrival",
"assumption",
"baseball",
"chapter",
"committee",
"conversation",
"database",
"enthusiasm",
"error",
"explanation",
"farmer",
"gate",
"girl",
"hall",
"historian",
"hospital",
"injury",
"instruction",
"maintenance",
"manufacturer",
"meal",
"perception",
"pie",
"poem",
"presence",
"proposal",
"reception",
"replacement",
"revolution",
"river",
"son",
| {
list := []string{
"people",
"history",
"way",
"art",
"world",
"information",
"map",
"two",
"family",
"government",
"health",
"system",
"computer",
"meat",
"year",
"thanks",
"music",
"person", | identifier_body |
|
words.go |
return buff
}
func ranWord() string {
list := []string{
"people",
"history",
"way",
"art",
"world",
"information",
"map",
"two",
"family",
"government",
"health",
"system",
"computer",
"meat",
"year",
"thanks",
"music",
"person",
"reading",
"method",
"data",
"food",
"understanding",
"theory",
"law",
"bird",
"literature",
"problem",
"software",
"control",
"knowledge",
"power",
"ability",
"economics",
"love",
"internet",
"television",
"science",
"library",
"nature",
"fact",
"product",
"idea",
"temperature",
"investment",
"area",
"society",
"activity",
"story",
"industry",
"media",
"thing",
"oven",
"community",
"definition",
"safety",
"quality",
"development",
"language",
"management",
"player",
"variety",
"video",
"week",
"security",
"country",
"exam",
"movie",
"organization",
"equipment",
"physics",
"analysis",
"policy",
"series",
"thought",
"basis",
"boyfriend",
"direction",
"strategy",
"technology",
"army",
"camera",
"freedom",
"paper",
"environment",
"child",
"instance",
"month",
"truth",
"marketing",
"university",
"writing",
"article",
"department",
"difference",
"goal",
"news",
"audience",
"fishing",
"growth",
"income",
"marriage",
"user",
"combination",
"failure",
"meaning",
"medicine",
"philosophy",
"teacher",
"communication",
"night",
"chemistry",
"disease",
"disk",
"energy",
"nation",
"road",
"role",
"soup",
"advertising",
"location",
"success",
"addition",
"apartment",
"education",
"math",
"moment",
"painting",
"politics",
"attention",
"decision",
"event",
"property",
"shopping",
"student",
"wood",
"competition",
"distribution",
"entertainment",
"office",
"population",
"president",
"unit",
"category",
"cigarette",
"context",
"introduction",
"opportunity",
"performance",
"driver",
"flight",
"length",
"magazine",
"newspaper",
"relationship",
"teaching",
"cell",
"dealer",
"finding",
"lake",
"member",
"message",
"phone",
"scene",
"appearance",
"association",
"concept",
"customer",
"death",
"discussion",
"housing",
"inflation",
"insurance",
"mood",
"woman",
"advice",
"blood",
"effort",
"expression",
"importance",
"opinion",
"payment",
"reality",
"responsibility",
"situation",
"skill",
"statement",
"wealth",
"application",
"city",
"county",
"depth",
"estate",
"foundation",
"grandmother",
"heart",
"perspective",
"photo",
"recipe",
"studio",
"topic",
"collection",
"depression",
"imagination",
"passion",
"percentage",
"resource",
"setting",
"ad",
"agency",
"college",
"connection",
"criticism",
"debt",
"description",
"memory",
"patience",
"secretary",
"solution",
"administration",
"aspect",
"attitude",
"director",
"personality",
"psychology",
"recommendation",
"response",
"selection",
"storage",
"version",
"alcohol",
"argument",
"complaint",
"contract",
"emphasis",
"highway",
"loss",
"membership",
"possession",
"preparation",
"steak",
"union",
"agreement",
"cancer",
"currency",
"employment",
"engineering",
"entry",
"interaction",
"mixture",
"preference",
"region",
"republic",
"tradition",
"virus",
"actor",
"classroom",
"delivery",
"device",
"difficulty",
"drama",
"election",
"engine",
"football",
"guidance",
"hotel",
"owner",
"priority",
"protection",
"suggestion",
"tension",
"variation",
"anxiety",
"atmosphere",
"awareness",
"bath",
"bread",
"candidate",
"climate",
"comparison",
"confusion",
"construction",
"elevator",
"emotion",
"employee",
"employer",
"guest",
"height",
"leadership",
"mall",
"manager",
"operation",
"recording",
"sample",
"transportation",
"charity",
"cousin",
"disaster",
"editor",
"efficiency",
"excitement",
"extent",
"feedback",
"guitar",
"homework",
"leader",
"mom",
"outcome",
"permission",
"presentation",
"promotion",
"reflection",
"refrigerator",
"resolution",
"revenue",
"session",
"singer",
"tennis",
"basket",
"bonus",
"cabinet",
"childhood",
"church",
"clothes",
"coffee",
"dinner",
"drawing",
"hair",
"hearing",
"initiative",
"judgment",
"lab",
"measurement",
"mode",
"mud",
"orange",
"poetry",
"police",
"possibility",
"procedure",
"queen",
"ratio",
"relation",
"restaurant",
"satisfaction",
"sector",
"signature",
"significance",
"song",
"tooth",
"town",
"vehicle",
"volume",
"wife",
"accident",
"airport",
"appointment",
"arrival",
"assumption",
"baseball",
"chapter",
"committee",
"conversation",
"database",
"enthusiasm",
"error",
"explanation",
"farmer",
"gate",
"girl",
"hall",
"historian",
"hospital",
"injury",
"instruction",
"maintenance",
"manufacturer",
"meal",
"perception",
| {
first := x[0:1]
last := x[1:len(x)]
first = strings.ToUpper(first)
buff += first + last
} | conditional_block |
|
codec.py | i))) for i in range(0, levels + 1)]
return shapes
def wavelet_decoded_length(min_shape, max_shape):
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
length = functools.reduce(lambda agg, s: agg + (3 * (s[0] * s[1])), shapes, 0)
length += (min_shape[0] * min_shape[1])
return length
def wavelet_decode(hic: hic.HicImage) -> model.CompressedImage:
utils.debug_msg("Wavelet decode")
assert hic.hic_type == model.Compression.HIC
payloads = hic.payloads
utils.debug_msg("Decoding Huffman trees")
value_huffs = {
"lum": huffman_decode(payloads[0]),
"cr": huffman_decode(payloads[1]),
"cb": huffman_decode(payloads[2])
}
length_huffs = {
"lum": huffman_decode(payloads[3]),
"cr": huffman_decode(payloads[4]),
"cb": huffman_decode(payloads[5])
}
utils.debug_msg("Decode RLE values")
value_comps = {
"lum": huffman_data_decode(payloads[6], value_huffs["lum"]),
"cr": huffman_data_decode(payloads[7], value_huffs["cr"]),
"cb": huffman_data_decode(payloads[8], value_huffs["cb"]),
}
utils.debug_msg("Decode RLE lengths")
length_comps = {
"lum": huffman_data_decode(payloads[9], length_huffs["lum"]),
"cr": huffman_data_decode(payloads[10], length_huffs["cr"]),
"cb": huffman_data_decode(payloads[11], length_huffs["cb"]),
}
min_shape = payloads[12].numbers
max_shape = payloads[13].numbers
utils.debug_msg("Unloaded all of the data")
# ====
rles = utils.dict_map(value_comps,
lambda k, v: [RunLength(value=t[1], length=t[0]) for t in list(zip(length_comps[k], v))])
length = wavelet_decoded_length(min_shape, max_shape)
data = utils.dict_map(rles, lambda _, v: decode_run_length(v, length))
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
channels = utils.dict_map(data, lambda _, v: wavelet_decode_pull_subbands(v, shapes))
return model.CompressedImage.from_dict(channels)
def huffman_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman in payload
"""
leaves = huff.encode_table()
return hic.PayloadStringP(hic.TupP, [hic.TupP(t[0], t[1]) for t in leaves])
def huffman_decode(data: hic.PayloadStringP) -> huffman.HuffmanTree:
"""
Decode huffman from payload
"""
number_string = data.payloads
leaves = [p.numbers for p in number_string]
return huffman.HuffmanTree.construct_from_coding(leaves)
def huffman_data_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman data into payload
"""
data = huff.encode_data()
return hic.BitStringP(data)
def huffman_data_decode(data: hic.BitStringP, huffman: huffman.HuffmanTree) -> list:
"""
Decode huffman data from payload with huffman tree
"""
return huffman.decode_data(data.payload)
def jpeg_encode(compressed: model.CompressedImage) -> hic.HicImage:
"""
Generally follow JPEG encoding. Since for the wavelet work I am don't have some standard huffman tree to work with
I might as well be consistent between the two implementations and just encode the entire array with custom
Huffman trees. To attempt to be honest with the implementation though, I'll still treat the DC components
separately by doing the differences and again applying a custom Huffman. A main feature of DCT on each block is the
meaning of the DC component.
For RL it's also easier implementation-wise to split up the length from the value and not try to optimize and weave
them together. Yes, the encoding will suffer bloat, but we are trying to highlight the transforms anyway.
"""
utils.debug_msg("Starting JPEG encoding")
dc_comps = utils.dict_map(compressed.as_dict,
lambda _, v: differential_coding(transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)))
utils.debug_msg("Determined differences DC components")
def ac_comp_fun(k, v):
utils.debug_msg("Determining AC components for: " + k)
splits = transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)
acs = transform.ac_components(splits)
utils.debug_msg("Calculating RLE for: " + k)
out = run_length_coding(acs)
return out
# on each transformed channel, run RLE on the AC components of each block
ac_comps = utils.dict_map(compressed.as_dict, ac_comp_fun)
utils.debug_msg("Determined RLEs for AC components")
dc_huffs = utils.dict_map(dc_comps, lambda _, v: huffman.HuffmanTree.construct_from_data(v))
ac_value_huffs = utils.dict_map(ac_comps,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda s: s.value))
ac_length_huffs = utils.dict_map(ac_comps,
lambda _, v: huffman.HuffmanTree.construct_from_data(v,
key_func=lambda s: s.length))
def encode_huff(d):
huffs = [t[1] for t in d.items()]
return [huffman_encode(h) for h in huffs]
def encode_data(d):
huffs = [t[1] for t in d.items()]
return [huffman_data_encode(h) for h in huffs]
payloads = utils.flatten([
encode_huff(dc_huffs),
encode_huff(ac_value_huffs),
encode_huff(ac_length_huffs),
encode_data(dc_huffs),
encode_data(ac_value_huffs),
encode_data(ac_length_huffs),
[
hic.TupP(compressed.shape[0][0], compressed.shape[0][1]),
hic.TupP(compressed.shape[1][0], compressed.shape[1][1])
]
])
return hic.HicImage.jpeg_image(payloads)
def jpeg_decode(hic: hic.HicImage) -> model.CompressedImage:
"""
Reverse jpeg_encode()
payloads = utils.flatten([
encode_huff(dc_huffs),
encode_huff(ac_value_huffs),
encode_huff(ac_length_huffs),
encode_data(dc_huffs),
encode_data(ac_value_huffs),
encode_data(ac_length_huffs)
])
"""
utils.debug_msg("JPEG decode")
assert hic.hic_type == model.Compression.JPEG
payloads = hic.payloads
utils.debug_msg("Decoding Huffman trees")
dc_huffs = {
"lum": huffman_decode(payloads[0]),
"cr": huffman_decode(payloads[1]),
"cb": huffman_decode(payloads[2])
}
ac_value_huffs = {
"lum": huffman_decode(payloads[3]),
"cr": huffman_decode(payloads[4]),
"cb": huffman_decode(payloads[5])
}
ac_length_huffs = {
"lum": huffman_decode(payloads[6]),
"cr": huffman_decode(payloads[7]),
"cb": huffman_decode(payloads[8])
}
utils.debug_msg("Decode DC differences")
dc_comps = {
"lum": huffman_data_decode(payloads[9], dc_huffs["lum"]),
"cr": huffman_data_decode(payloads[10], dc_huffs["cr"]),
"cb": huffman_data_decode(payloads[11], dc_huffs["cb"]),
}
utils.debug_msg("Decode RLE values")
ac_values = {
"lum": huffman_data_decode(payloads[12], ac_value_huffs["lum"]),
"cr": huffman_data_decode(payloads[13], ac_value_huffs["cr"]),
"cb": huffman_data_decode(payloads[14], ac_value_huffs["cb"]),
}
utils.debug_msg("Decode RLE lengths")
ac_lengths = {
"lum": huffman_data_decode(payloads[15], ac_length_huffs["lum"]),
"cr": huffman_data_decode(payloads[16], ac_length_huffs["cr"]),
"cb": huffman_data_decode(payloads[17], ac_length_huffs["cb"]),
}
shapes = {
"lum": payloads[18].numbers,
"cr": payloads[19].numbers,
"cb": payloads[19].numbers
}
utils.debug_msg("Unloaded all of the data")
# ====
sub_length = utils.size(settings.JPEG_BLOCK_SHAPE()) - 1
utils.debug_msg("Calculating AC RLEs")
ac_rle = utils.dict_map(ac_values,
lambda k, v: [RunLength(t[1], t[0]) for t in list(zip(ac_lengths[k], v))])
def | ac_mat_fun | identifier_name |
|
codec.py |
utils.debug_msg("Going to determine RLE for %d size array" % len(arr))
rl = functools.reduce(reduction, arr, [{"zeros": 0}])
utils.debug_msg("%d long RLE created" % len(rl))
# If the last element has no value then it was 0! That is a special tuple, (0,0)
if "value" not in rl[-1]:
rl[-1] = {"zeros": 0, "value": 0}
# the goal of RLE in the case of compression is to contain the first symbol (length, size) within a byte
# so if the length is too long, then we need to break it up
if max_len is not None:
utils.debug_msg("Breaking up RLE lengths that are larger than %d" % max_len)
rl = [_break_up_rle(code, max_len) for code in rl]
rl = utils.flatten(rl)
utils.debug_msg("Make RLE objects")
return [RunLength.from_dict(r) for r in rl]
def decode_run_length(rles: List[RunLength], length: int):
arr = []
for (i, d) in enumerate(rles):
arr.append(d.segment)
arr = utils.flatten(arr)
# arr = utils.flatten([d.segment for d in rles])
if rles[-1].is_trailing:
|
return arr
def wavelet_encode(compressed: model.CompressedImage):
"""
In brief reading of literature, Huffman coding is still considered for wavelet image compression. There are other
more effective (and complicated schemes) that I think are out of scope of this project which is just to introduce
the concepts.
"""
def collapse_subbands(k, v):
out = [transform.zigzag(l) for l in v]
out = utils.flatten(out)
return out
utils.debug_msg("Starting Wavelet encoding")
lin_subbands = utils.dict_map(compressed.as_dict, collapse_subbands)
utils.debug_msg("Have completed linearizing the subbands")
rles = utils.dict_map(lin_subbands, lambda _, v: run_length_coding(v))
utils.debug_msg("Have completed the run length encodings")
values_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.value))
length_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.length))
utils.debug_msg("Huffman trees are constructed")
def encode_huff(d):
huffs = [t[1] for t in d.items()]
return [huffman_encode(h) for h in huffs]
def encode_data(d):
huffs = [t[1] for t in d.items()]
return [huffman_data_encode(h) for h in huffs]
smallest = compressed.luminance_component[0].shape
biggest = compressed.luminance_component[-1].shape
payloads = utils.flatten([
encode_huff(values_huffs),
encode_huff(length_huffs),
encode_data(values_huffs),
encode_data(length_huffs),
[
hic.TupP(smallest[0], smallest[1]),
hic.TupP(biggest[0], biggest[1])
]
])
return hic.HicImage.wavelet_image(payloads)
def wavelet_decode_pull_subbands(data, shapes):
offset = utils.size(shapes[0])
subbands = [transform.izigzag(np.array(data[:offset]), shapes[0])]
for i in range(len(shapes)):
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
return subbands
def wavelet_decoded_subbands_shapes(min_shape, max_shape):
"""
We just do Haar or Daubechie, assume power of 2
"""
levels = int(np.sqrt(max_shape[0] // min_shape[0]))
shapes = [(min_shape[0] * (np.power(2, i)), min_shape[1] * (np.power(2, i))) for i in range(0, levels + 1)]
return shapes
def wavelet_decoded_length(min_shape, max_shape):
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
length = functools.reduce(lambda agg, s: agg + (3 * (s[0] * s[1])), shapes, 0)
length += (min_shape[0] * min_shape[1])
return length
def wavelet_decode(hic: hic.HicImage) -> model.CompressedImage:
utils.debug_msg("Wavelet decode")
assert hic.hic_type == model.Compression.HIC
payloads = hic.payloads
utils.debug_msg("Decoding Huffman trees")
value_huffs = {
"lum": huffman_decode(payloads[0]),
"cr": huffman_decode(payloads[1]),
"cb": huffman_decode(payloads[2])
}
length_huffs = {
"lum": huffman_decode(payloads[3]),
"cr": huffman_decode(payloads[4]),
"cb": huffman_decode(payloads[5])
}
utils.debug_msg("Decode RLE values")
value_comps = {
"lum": huffman_data_decode(payloads[6], value_huffs["lum"]),
"cr": huffman_data_decode(payloads[7], value_huffs["cr"]),
"cb": huffman_data_decode(payloads[8], value_huffs["cb"]),
}
utils.debug_msg("Decode RLE lengths")
length_comps = {
"lum": huffman_data_decode(payloads[9], length_huffs["lum"]),
"cr": huffman_data_decode(payloads[10], length_huffs["cr"]),
"cb": huffman_data_decode(payloads[11], length_huffs["cb"]),
}
min_shape = payloads[12].numbers
max_shape = payloads[13].numbers
utils.debug_msg("Unloaded all of the data")
# ====
rles = utils.dict_map(value_comps,
lambda k, v: [RunLength(value=t[1], length=t[0]) for t in list(zip(length_comps[k], v))])
length = wavelet_decoded_length(min_shape, max_shape)
data = utils.dict_map(rles, lambda _, v: decode_run_length(v, length))
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
channels = utils.dict_map(data, lambda _, v: wavelet_decode_pull_subbands(v, shapes))
return model.CompressedImage.from_dict(channels)
def huffman_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman in payload
"""
leaves = huff.encode_table()
return hic.PayloadStringP(hic.TupP, [hic.TupP(t[0], t[1]) for t in leaves])
def huffman_decode(data: hic.PayloadStringP) -> huffman.HuffmanTree:
"""
Decode huffman from payload
"""
number_string = data.payloads
leaves = [p.numbers for p in number_string]
return huffman.HuffmanTree.construct_from_coding(leaves)
def huffman_data_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman data into payload
"""
data = huff.encode_data()
return hic.BitStringP(data)
def huffman_data_decode(data: hic.BitStringP, huffman: huffman.HuffmanTree) -> list:
"""
Decode huffman data from payload with huffman tree
"""
return huffman.decode_data(data.payload)
def jpeg_encode(compressed: model.CompressedImage) -> hic.HicImage:
"""
Generally follow JPEG encoding. Since for the wavelet work I am don't have some standard huffman tree to work with
I might as well be consistent between the two implementations and just encode the entire array with custom
Huffman trees. To attempt to be honest with the implementation though, I'll still treat the DC components
separately by doing the differences and again applying a custom Huffman. A main feature of DCT on each block is the
meaning of the DC component.
For RL it's also easier implementation-wise to split up the length from the value and not try to optimize and weave
them together. Yes, the encoding will suffer bloat, but we are trying to highlight the transforms anyway.
"""
utils.debug_msg("Starting JPEG encoding")
dc_comps = utils.dict_map(compressed.as_dict,
lambda _, v: differential_coding(transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)))
utils.debug_msg("Determined differences DC components")
def ac_comp_fun(k, v):
utils.debug_msg("Determining AC components for: " + k)
splits = transform.split_matrix(v, settings.JPEG | fill = length - len(arr)
arr += ([0] * fill) | conditional_block |
codec.py | l = code["zeros"]
div = l // max_len
full = {
"zeros": max_len - 1, # minus 1 because we get another for free from the value
"value": 0
}
return ([full] * div) + [{
"zeros": l - (div * max_len),
"value": code["value"]
}]
def reduction(agg, next):
if "value" in agg[-1]:
agg.append({"zeros": 0})
if next == 0:
agg[-1]["zeros"] += 1
return agg
if "value" not in agg[-1]:
agg[-1]["value"] = next
return agg
utils.debug_msg("Going to determine RLE for %d size array" % len(arr))
rl = functools.reduce(reduction, arr, [{"zeros": 0}])
utils.debug_msg("%d long RLE created" % len(rl))
# If the last element has no value then it was 0! That is a special tuple, (0,0)
if "value" not in rl[-1]:
rl[-1] = {"zeros": 0, "value": 0}
# the goal of RLE in the case of compression is to contain the first symbol (length, size) within a byte
# so if the length is too long, then we need to break it up
if max_len is not None:
utils.debug_msg("Breaking up RLE lengths that are larger than %d" % max_len)
rl = [_break_up_rle(code, max_len) for code in rl]
rl = utils.flatten(rl)
utils.debug_msg("Make RLE objects")
return [RunLength.from_dict(r) for r in rl]
def decode_run_length(rles: List[RunLength], length: int):
arr = []
for (i, d) in enumerate(rles):
arr.append(d.segment)
arr = utils.flatten(arr)
# arr = utils.flatten([d.segment for d in rles])
if rles[-1].is_trailing:
fill = length - len(arr)
arr += ([0] * fill)
return arr
def wavelet_encode(compressed: model.CompressedImage):
"""
In brief reading of literature, Huffman coding is still considered for wavelet image compression. There are other
more effective (and complicated schemes) that I think are out of scope of this project which is just to introduce
the concepts.
"""
def collapse_subbands(k, v):
out = [transform.zigzag(l) for l in v]
out = utils.flatten(out)
return out
utils.debug_msg("Starting Wavelet encoding")
lin_subbands = utils.dict_map(compressed.as_dict, collapse_subbands)
utils.debug_msg("Have completed linearizing the subbands")
rles = utils.dict_map(lin_subbands, lambda _, v: run_length_coding(v))
utils.debug_msg("Have completed the run length encodings")
values_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.value))
length_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.length))
utils.debug_msg("Huffman trees are constructed")
def encode_huff(d):
huffs = [t[1] for t in d.items()]
return [huffman_encode(h) for h in huffs]
def encode_data(d):
huffs = [t[1] for t in d.items()]
return [huffman_data_encode(h) for h in huffs]
smallest = compressed.luminance_component[0].shape
biggest = compressed.luminance_component[-1].shape
payloads = utils.flatten([
encode_huff(values_huffs),
encode_huff(length_huffs),
encode_data(values_huffs),
encode_data(length_huffs),
[
hic.TupP(smallest[0], smallest[1]),
hic.TupP(biggest[0], biggest[1])
]
])
return hic.HicImage.wavelet_image(payloads)
def wavelet_decode_pull_subbands(data, shapes):
offset = utils.size(shapes[0])
subbands = [transform.izigzag(np.array(data[:offset]), shapes[0])]
for i in range(len(shapes)):
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
return subbands
def wavelet_decoded_subbands_shapes(min_shape, max_shape):
"""
We just do Haar or Daubechie, assume power of 2
"""
levels = int(np.sqrt(max_shape[0] // min_shape[0]))
shapes = [(min_shape[0] * (np.power(2, i)), min_shape[1] * (np.power(2, i))) for i in range(0, levels + 1)]
return shapes
def wavelet_decoded_length(min_shape, max_shape):
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
length = functools.reduce(lambda agg, s: agg + (3 * (s[0] * s[1])), shapes, 0)
length += (min_shape[0] * min_shape[1])
return length
def wavelet_decode(hic: hic.HicImage) -> model.CompressedImage:
utils.debug_msg("Wavelet decode")
assert hic.hic_type == model.Compression.HIC
payloads = hic.payloads
utils.debug_msg("Decoding Huffman trees")
value_huffs = {
"lum": huffman_decode(payloads[0]),
"cr": huffman_decode(payloads[1]),
"cb": huffman_decode(payloads[2])
}
length_huffs = {
"lum": huffman_decode(payloads[3]),
"cr": huffman_decode(payloads[4]),
"cb": huffman_decode(payloads[5])
}
utils.debug_msg("Decode RLE values")
value_comps = {
"lum": huffman_data_decode(payloads[6], value_huffs["lum"]),
"cr": huffman_data_decode(payloads[7], value_huffs["cr"]),
"cb": huffman_data_decode(payloads[8], value_huffs["cb"]),
}
utils.debug_msg("Decode RLE lengths")
length_comps = {
"lum": huffman_data_decode(payloads[9], length_huffs["lum"]),
"cr": huffman_data_decode(payloads[10], length_huffs["cr"]),
"cb": huffman_data_decode(payloads[11], length_huffs["cb"]),
}
min_shape = payloads[12].numbers
max_shape = payloads[13].numbers
utils.debug_msg("Unloaded all of the data")
# ====
rles = utils.dict_map(value_comps,
lambda k, v: [RunLength(value=t[1], length=t[0]) for t in list(zip(length_comps[k], v))])
length = wavelet_decoded_length(min_shape, max_shape)
data = utils.dict_map(rles, lambda _, v: decode_run_length(v, length))
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
channels = utils.dict_map(data, lambda _, v: wavelet_decode_pull_subbands(v, shapes))
return model.CompressedImage.from_dict(channels)
def huffman_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman in payload
"""
leaves = huff.encode_table()
return hic.PayloadStringP(hic.TupP, [hic.TupP(t[0], t[1]) for t in leaves])
def huffman_decode(data: hic.PayloadStringP) -> huffman.HuffmanTree:
"""
Decode huffman from payload
"""
number_string = data.payloads
leaves = [p.numbers for p in number_string]
return huffman.HuffmanTree.construct_from_coding(leaves)
def huffman_data_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman data into payload
"""
data = huff.encode_data()
return hic.BitStringP(data)
def huffman_data_decode(data: hic.BitStringP, huffman: huffman.HuffmanTree) -> list:
"""
Decode huffman data from payload with huffman tree
"""
return huffman.decode_data(data.payload)
def jpeg_encode(compressed: model.CompressedImage) -> hic.HicImage:
"""
Generally follow JPEG encoding. Since for the wavelet work I am don't have some standard huffman tree to work with
I might as well be consistent between the two implementations and just encode the entire array with custom
Huffman trees. To | """
Come up with the run length encoding for a matrix
"""
def _break_up_rle(code, max_len): | random_line_split |
|
codec.py |
utils.debug_msg("Going to determine RLE for %d size array" % len(arr))
rl = functools.reduce(reduction, arr, [{"zeros": 0}])
utils.debug_msg("%d long RLE created" % len(rl))
# If the last element has no value then it was 0! That is a special tuple, (0,0)
if "value" not in rl[-1]:
rl[-1] = {"zeros": 0, "value": 0}
# the goal of RLE in the case of compression is to contain the first symbol (length, size) within a byte
# so if the length is too long, then we need to break it up
if max_len is not None:
utils.debug_msg("Breaking up RLE lengths that are larger than %d" % max_len)
rl = [_break_up_rle(code, max_len) for code in rl]
rl = utils.flatten(rl)
utils.debug_msg("Make RLE objects")
return [RunLength.from_dict(r) for r in rl]
def decode_run_length(rles: List[RunLength], length: int):
arr = []
for (i, d) in enumerate(rles):
arr.append(d.segment)
arr = utils.flatten(arr)
# arr = utils.flatten([d.segment for d in rles])
if rles[-1].is_trailing:
fill = length - len(arr)
arr += ([0] * fill)
return arr
def wavelet_encode(compressed: model.CompressedImage):
"""
In brief reading of literature, Huffman coding is still considered for wavelet image compression. There are other
more effective (and complicated schemes) that I think are out of scope of this project which is just to introduce
the concepts.
"""
def collapse_subbands(k, v):
out = [transform.zigzag(l) for l in v]
out = utils.flatten(out)
return out
utils.debug_msg("Starting Wavelet encoding")
lin_subbands = utils.dict_map(compressed.as_dict, collapse_subbands)
utils.debug_msg("Have completed linearizing the subbands")
rles = utils.dict_map(lin_subbands, lambda _, v: run_length_coding(v))
utils.debug_msg("Have completed the run length encodings")
values_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.value))
length_huffs = utils.dict_map(rles,
lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda t: t.length))
utils.debug_msg("Huffman trees are constructed")
def encode_huff(d):
huffs = [t[1] for t in d.items()]
return [huffman_encode(h) for h in huffs]
def encode_data(d):
huffs = [t[1] for t in d.items()]
return [huffman_data_encode(h) for h in huffs]
smallest = compressed.luminance_component[0].shape
biggest = compressed.luminance_component[-1].shape
payloads = utils.flatten([
encode_huff(values_huffs),
encode_huff(length_huffs),
encode_data(values_huffs),
encode_data(length_huffs),
[
hic.TupP(smallest[0], smallest[1]),
hic.TupP(biggest[0], biggest[1])
]
])
return hic.HicImage.wavelet_image(payloads)
def wavelet_decode_pull_subbands(data, shapes):
|
def wavelet_decoded_subbands_shapes(min_shape, max_shape):
"""
We just do Haar or Daubechie, assume power of 2
"""
levels = int(np.sqrt(max_shape[0] // min_shape[0]))
shapes = [(min_shape[0] * (np.power(2, i)), min_shape[1] * (np.power(2, i))) for i in range(0, levels + 1)]
return shapes
def wavelet_decoded_length(min_shape, max_shape):
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
length = functools.reduce(lambda agg, s: agg + (3 * (s[0] * s[1])), shapes, 0)
length += (min_shape[0] * min_shape[1])
return length
def wavelet_decode(hic: hic.HicImage) -> model.CompressedImage:
utils.debug_msg("Wavelet decode")
assert hic.hic_type == model.Compression.HIC
payloads = hic.payloads
utils.debug_msg("Decoding Huffman trees")
value_huffs = {
"lum": huffman_decode(payloads[0]),
"cr": huffman_decode(payloads[1]),
"cb": huffman_decode(payloads[2])
}
length_huffs = {
"lum": huffman_decode(payloads[3]),
"cr": huffman_decode(payloads[4]),
"cb": huffman_decode(payloads[5])
}
utils.debug_msg("Decode RLE values")
value_comps = {
"lum": huffman_data_decode(payloads[6], value_huffs["lum"]),
"cr": huffman_data_decode(payloads[7], value_huffs["cr"]),
"cb": huffman_data_decode(payloads[8], value_huffs["cb"]),
}
utils.debug_msg("Decode RLE lengths")
length_comps = {
"lum": huffman_data_decode(payloads[9], length_huffs["lum"]),
"cr": huffman_data_decode(payloads[10], length_huffs["cr"]),
"cb": huffman_data_decode(payloads[11], length_huffs["cb"]),
}
min_shape = payloads[12].numbers
max_shape = payloads[13].numbers
utils.debug_msg("Unloaded all of the data")
# ====
rles = utils.dict_map(value_comps,
lambda k, v: [RunLength(value=t[1], length=t[0]) for t in list(zip(length_comps[k], v))])
length = wavelet_decoded_length(min_shape, max_shape)
data = utils.dict_map(rles, lambda _, v: decode_run_length(v, length))
shapes = wavelet_decoded_subbands_shapes(min_shape, max_shape)
channels = utils.dict_map(data, lambda _, v: wavelet_decode_pull_subbands(v, shapes))
return model.CompressedImage.from_dict(channels)
def huffman_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman in payload
"""
leaves = huff.encode_table()
return hic.PayloadStringP(hic.TupP, [hic.TupP(t[0], t[1]) for t in leaves])
def huffman_decode(data: hic.PayloadStringP) -> huffman.HuffmanTree:
"""
Decode huffman from payload
"""
number_string = data.payloads
leaves = [p.numbers for p in number_string]
return huffman.HuffmanTree.construct_from_coding(leaves)
def huffman_data_encode(huff: huffman.HuffmanTree) -> hic.Payload:
"""
Encode huffman data into payload
"""
data = huff.encode_data()
return hic.BitStringP(data)
def huffman_data_decode(data: hic.BitStringP, huffman: huffman.HuffmanTree) -> list:
"""
Decode huffman data from payload with huffman tree
"""
return huffman.decode_data(data.payload)
def jpeg_encode(compressed: model.CompressedImage) -> hic.HicImage:
"""
Generally follow JPEG encoding. Since for the wavelet work I am don't have some standard huffman tree to work with
I might as well be consistent between the two implementations and just encode the entire array with custom
Huffman trees. To attempt to be honest with the implementation though, I'll still treat the DC components
separately by doing the differences and again applying a custom Huffman. A main feature of DCT on each block is the
meaning of the DC component.
For RL it's also easier implementation-wise to split up the length from the value and not try to optimize and weave
them together. Yes, the encoding will suffer bloat, but we are trying to highlight the transforms anyway.
"""
utils.debug_msg("Starting JPEG encoding")
dc_comps = utils.dict_map(compressed.as_dict,
lambda _, v: differential_coding(transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)))
utils.debug_msg("Determined differences DC components")
def ac_comp_fun(k, v):
utils.debug_msg("Determining AC components for: " + k)
splits = transform.split_matrix(v, settings.JPEG_BLOCK | offset = utils.size(shapes[0])
subbands = [transform.izigzag(np.array(data[:offset]), shapes[0])]
for i in range(len(shapes)):
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
subbands.append(transform.izigzag(np.array(data[offset:offset + utils.size(shapes[i])]), shapes[i]))
offset += utils.size(shapes[i])
return subbands | identifier_body |
mailbox.rs | send,
namespace_filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the
//% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process.
//% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> |
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside | {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
} | identifier_body |
mailbox.rs | send,
namespace_filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the | //% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
}
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside the memory.
| //% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process. | random_line_split |
mailbox.rs | _filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the
//% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process.
//% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
}
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside the memory.
fn | prepare_receive | identifier_name |
|
string.rs | _sub(self.opener.len() + self.line_end.len() + 1)?
+ 1,
)
}
/// Like max_chars_with_indent but the indentation is not substracted.
/// This allows to fit more graphemes from the string on a line when
/// SnippetState::Overflow.
fn max_chars_without_indent(&self) -> Option<usize> {
Some(self.config.max_width().checked_sub(self.line_end.len())?)
}
}
pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> {
let max_chars_with_indent = fmt.max_chars_with_indent()?;
let max_chars_without_indent = fmt.max_chars_without_indent()?;
let indent = fmt.shape.indent.to_string_with_newline(fmt.config);
// Strip line breaks.
// With this regex applied, all remaining whitespaces are significant
let strip_line_breaks_re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][[:space:]]*").unwrap();
let stripped_str = strip_line_breaks_re.replace_all(orig, "$1");
let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>();
// `cur_start` is the position in `orig` of the start of the current line.
let mut cur_start = 0;
let mut result = String::with_capacity(
stripped_str
.len()
.checked_next_power_of_two()
.unwrap_or(usize::max_value()),
);
result.push_str(fmt.opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if !is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if !is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus. ".to_string(), 29)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus.".to_string(), 29)
);
}
#[test]
fn | nothing_to_break | identifier_name |
|
string.rs | .opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if !is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if !is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus. ".to_string(), 29)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus.".to_string(), 29)
);
}
#[test]
fn nothing_to_break() {
let string = "Venenatis_tellus_vel_tellus";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::EndOfInput("Venenatis_tellus_vel_tellus".to_string())
);
}
#[test]
fn significant_whitespaces() | {
let string = "Neque in sem. \n Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(15, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
assert_eq!(
break_string(25, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
// if `StringFormat::line_end` is true, then the line feed does not matter anymore
assert_eq!(
break_string(15, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
assert_eq!(
break_string(25, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
); | identifier_body |
|
string.rs | a> {
StringFormat {
opener: "\"",
closer: "\"",
line_start: " ",
line_end: "\\",
shape,
trim_end: false,
config,
}
}
/// Returns the maximum number of graphemes that is possible on a line while taking the
/// indentation into account.
///
/// If we cannot put at least a single character per line, the rewrite won't succeed.
fn max_chars_with_indent(&self) -> Option<usize> {
Some(
self.shape
.width | /// Like max_chars_with_indent but the indentation is not substracted.
/// This allows to fit more graphemes from the string on a line when
/// SnippetState::Overflow.
fn max_chars_without_indent(&self) -> Option<usize> {
Some(self.config.max_width().checked_sub(self.line_end.len())?)
}
}
pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> {
let max_chars_with_indent = fmt.max_chars_with_indent()?;
let max_chars_without_indent = fmt.max_chars_without_indent()?;
let indent = fmt.shape.indent.to_string_with_newline(fmt.config);
// Strip line breaks.
// With this regex applied, all remaining whitespaces are significant
let strip_line_breaks_re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][[:space:]]*").unwrap();
let stripped_str = strip_line_breaks_re.replace_all(orig, "$1");
let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>();
// `cur_start` is the position in `orig` of the start of the current line.
let mut cur_start = 0;
let mut result = String::with_capacity(
stripped_str
.len()
.checked_next_power_of_two()
.unwrap_or(usize::max_value()),
);
result.push_str(fmt.opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if !is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if !trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if !is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation:: | .checked_sub(self.opener.len() + self.line_end.len() + 1)?
+ 1,
)
}
| random_line_split |
server.rs | = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received
Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message
match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
}
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+ 'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some(Err(err.into())));
}
}
}
// A connection has closed
// Drop the connection and loop back
// This will mean attempting to accept a new connection
Poll::Ready(Some((None, _conn))) => continue 'poll_conns,
// The queue is empty or nothing is ready
Poll::Ready(None) | Poll::Pending => break 'poll_conns,
}
}
// If we've gotten this far, then there are no events for us to process
// and nothing was ready, so figure out if we're not done yet or if
// we've reached the end.
if self.accept.is_done() {
Poll::Ready(None)
} else {
Poll::Pending
}
}
}
trait StreamListenExt: Stream {
fn listen(self, max_connections: usize) -> Listen<Self>
where
Self: Sized + Unpin,
Self::Item: Stream + Unpin,
{
Listen {
accept: self.fuse(),
connections: FuturesUnordered::new(),
max: max_connections,
}
}
}
impl<S> StreamListenExt for S where S: Stream {}
struct Decode<F> {
max_size_bytes: usize,
read_head: usize,
discarding: bool,
receive: F,
}
impl<F> Decode<F> {
pub fn | new | identifier_name |
|
server.rs | (self) -> Result<(), Error> {
// Run the server on a fresh runtime
// We attempt to shut this runtime down cleanly to release
// any used resources
let runtime = Runtime::new().expect("failed to start new Runtime");
runtime.block_on(self.fut);
runtime.shutdown_now();
Ok(())
}
}
/**
A handle to a running GELF server that can be used to interact with it
programmatically.
*/
pub struct Handle {
close: oneshot::Sender<()>,
}
impl Handle {
/**
Close the server.
*/
pub fn close(self) -> bool {
self.close.send(()).is_ok()
}
}
/**
Build a server to receive GELF messages and process them.
*/
pub fn build(
config: Config,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Send + Sync + Unpin + Clone + 'static,
mut process: impl FnMut(Message) -> Result<(), Error> + Send + Sync + Unpin + Clone + 'static,
) -> Result<Server, Error> {
emit("Starting GELF server");
let addr = config.bind.addr.parse()?;
let (handle_tx, handle_rx) = oneshot::channel();
// Build a handle
let handle = Some(Handle { close: handle_tx });
let ctrl_c = ctrl_c()?;
let server = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received
Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message
match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> |
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+ 'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some( | {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
} | identifier_body |
server.rs | run(self) -> Result<(), Error> {
// Run the server on a fresh runtime
// We attempt to shut this runtime down cleanly to release
// any used resources
let runtime = Runtime::new().expect("failed to start new Runtime");
runtime.block_on(self.fut);
runtime.shutdown_now();
Ok(())
}
}
/**
A handle to a running GELF server that can be used to interact with it
programmatically.
*/
pub struct Handle {
close: oneshot::Sender<()>,
}
impl Handle {
/**
Close the server.
*/
pub fn close(self) -> bool {
self.close.send(()).is_ok()
}
}
/**
Build a server to receive GELF messages and process them.
*/
pub fn build(
config: Config,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Send + Sync + Unpin + Clone + 'static,
mut process: impl FnMut(Message) -> Result<(), Error> + Send + Sync + Unpin + Clone + 'static,
) -> Result<Server, Error> {
emit("Starting GELF server");
let addr = config.bind.addr.parse()?;
let (handle_tx, handle_rx) = oneshot::channel();
// Build a handle
let handle = Some(Handle { close: handle_tx });
let ctrl_c = ctrl_c()?;
let server = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received | match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
}
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+ 'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some(Err(err.into | Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message | random_line_split |
remote.rs | io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host, ?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else {
String::default()
};
// look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn process_tcp_stream(mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) | error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e | {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => { | identifier_body |
remote.rs | io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host, ?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else { | // look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn process_tcp_stream(mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e);
| String::default()
};
| random_line_split |
remote.rs | io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host, ?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else {
String::default()
};
// look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn | (mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e | process_tcp_stream | identifier_name |
file-record.ts | DummyFile {
name: string;
size: number;
type: string;
lastModified: number;
lastModifiedDate: Date;
}
interface UploadData {
data: any;
error: string | false;
}
export { Dimensions, Options, RawFileRecord };
class FileRecord {
public static getFromRaw(
rawFileRecord: RawFileRecord,
options: Options,
isSync = false
): FileRecord | Promise<FileRecord> {
const fileRecord = new FileRecord(rawFileRecord, options);
const promise = fileRecord.setUrl(rawFileRecord.url as string);
rawFileRecord.progress = fileRecord.progress.bind(fileRecord); // convert it as a function
rawFileRecord.src = fileRecord.src.bind(fileRecord);
rawFileRecord.name = fileRecord.name.bind(fileRecord); // convert it as a function
if (isSync) {
return fileRecord;
}
return promise;
}
public static fromRaw(rawFileRecord: RawFileRecord, options: Options): Promise<FileRecord> {
return FileRecord.getFromRaw(rawFileRecord, options, false) as Promise<FileRecord>;
}
public static fromRawSync(rawFileRecord: RawFileRecord, options: Options): FileRecord {
return FileRecord.getFromRaw(rawFileRecord, options, true) as FileRecord;
}
public static fromRawArray(rawFileRecords: RawFileRecord[], options: Options): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const rawFileRecord of rawFileRecords) {
promises.push(FileRecord.fromRaw(rawFileRecord, options));
}
return Promise.all(promises);
}
public static toRawArray(fileRecords: FileRecord[]): RawFileRecord[] {
const rawFileRecords: RawFileRecord[] = [];
for (const fileRecord of fileRecords) {
rawFileRecords.push(fileRecord.toRaw());
}
return rawFileRecords;
}
public static readFile(fileRecord: FileRecord): Promise<FileRecord> {
return new Promise((resolve, reject) => {
if (!fileRecord.read) {
fileRecord.setUrl(null).then(
() => {
resolve(fileRecord);
},
(err) => {
// ignore error
resolve(fileRecord);
}
);
return;
}
utils.getDataURL(fileRecord.file).then((dataUrl) => {
fileRecord.setUrl(dataUrl).then(() => {
resolve(fileRecord);
}, reject);
}, reject);
});
}
public static readFiles(fileRecords: FileRecord[]): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const fileRecord of fileRecords) {
promises.push(FileRecord.readFile(fileRecord));
}
return Promise.all(promises);
}
public urlValue: null | string = null;
public urlResized: null | string = null;
public image: HTMLImageElement | {} = {};
public isPlayingAv: boolean = false;
public oldFileName: string | null = null;
public oldCustomName: string | null = null;
public upload: UploadData = { data: null, error: false };
public raw: RawFileRecord;
public progressInternal: number;
public accept?: string;
public dimensions: Dimensions;
public error: false | ErrorFlags;
public file: File;
public height: undefined | number | string;
public width: undefined | number | string;
public id: string;
public imageColor?: RGBA;
public lastKnownSrc: null | string;
public maxSize?: string;
public options: Options;
public read: boolean;
public thumbnailSize: number;
public videoThumbnail: any;
public customName: any;
public xhr?: XMLHttpRequest;
public xhrQueue?: () => any;
public stopAv?: (() => any) | null;
public tusUpload?: any;
public calculateAverageColor: boolean;
public constructor(data: RawFileRecord, options: Options) {
this.urlValue = null;
this.urlResized = null;
this.lastKnownSrc = null;
this.image = {};
this.isPlayingAv = false;
this.oldFileName = null;
this.oldCustomName = null;
this.raw = data;
this.file = data.file instanceof File ? data.file : (this.createDummyFile(data) as any);
this.progressInternal = !isNaN(data.progress as number) ? (data.progress as number) : 0;
// this.width = FileRecord.defaultWidth;
// this.height = FileRecord.defaultHeight;
this.thumbnailSize = options.thumbnailSize || 360;
this.read = !!options.read;
this.dimensions = data.dimensions || { width: 0, height: 0 };
this.dimensions.width = this.dimensions.width || 0;
this.dimensions.height = this.dimensions.height || 0;
this.error = data.error || false;
this.options = options;
this.maxSize = options.maxSize;
this.accept = options.accept;
this.id = Math.random() + ':' + new Date().getTime();
this.videoThumbnail = data.videoThumbnail;
this.imageColor = data.imageColor;
this.customName = data.customName;
this.calculateAverageColor = options.averageColor !== undefined ? options.averageColor : true;
this.validate();
}
// populate(data, options = {}) {}
public createDummyFile(data: RawFileRecord): DummyFile {
const file: DummyFile = {} as DummyFile;
file.lastModified = data.lastModified;
const d = new Date();
if (file.lastModified) {
d.setTime(file.lastModified);
}
file.lastModifiedDate = d;
file.name = typeof data.name === 'function' ? data.name() : data.name;
file.size = data.size;
file.type = data.type;
return file;
}
public hasProgress(): boolean {
return !isNaN(this.progressInternal); // && this._progress <= 100;
}
public progress(value?: number): number | void {
if (value !== undefined) {
this.progressInternal = value;
return;
}
return this.progressInternal || 0;
}
public url(value?: string): string | undefined | Promise<this> {
if (value !== undefined) {
return this.setUrl(value);
}
return this.urlValue || undefined;
}
public src(): string {
if (this.isImage()) {
return this.urlResized || this.urlValue || (this.file as any).url;
}
if (this.isPlayableVideo()) {
return this.videoThumbnail || '';
}
return '';
}
public size(): string {
if (!this.file) {
return '';
}
return utils.getSizeFormatted(this.file.size);
}
public ext(): string {
if (this.file && this.file.name.indexOf('.') !== -1) {
return (this.file.name as any).split('.').pop();
}
return '?';
// return this.file.type.split('/').shift();
}
public name(withoutExt?: boolean): string {
const ext = this.ext();
if (this.customName) {
return this.customName + (withoutExt ? '' : ext !== '?' ? '.' + ext : '');
}
const name = this.file && this.file.name;
if (withoutExt) {
if (ext !== '?') {
return name.substr(0, name.length - (ext.length + 1));
}
}
return name;
}
public isDarkColor(): boolean {
if (this.imageColor) {
const rgb = this.imageColor;
const darkPoint = 20;
return rgb[0] <= darkPoint && rgb[1] <= darkPoint && rgb[2] <= darkPoint;
}
return false;
}
public color(): string {
if (this.imageColor) {
const rgb = this.imageColor;
return 'rgb(' + rgb[0] + ', ' + rgb[1] + ', ' + rgb[2] + ')';
}
if (this.isImage()) {
return 'transparent';
}
const ext = this.ext();
const svgIcon = this.icon();
// var svgIcon = getIconFromExt(ext);
if (svgIcon.color) {
return svgIcon.color;
}
return utils.getColorForText(ext);
}
public isImage(): boolean {
return this.file && !!this.file.type.match(/image((?!vnd).)*$/i);
}
public isVideo(): boolean {
return this.file && this.file.type.indexOf('video') !== -1;
}
public isPlayableVideo(): boolean {
return this.icon().category === 'video-playable';
}
public isAudio(): boolean {
return this.file && this.file.type.indexOf('audio') !== -1;
}
public isPlayableAudio(): boolean {
return this.icon().category === 'audio-playable';
}
public isText(): boolean {
return this.file && this.file.type.indexOf('text') !== -1;
}
public setUrl(url: string | null): Promise<this> {
this.urlValue = url;
return new Promise((resolve, reject) => {
if (this.isImage()) {
this.resizeImage().then(
() => {
resolve(this);
},
(err) => {
resolve(this);
}
);
return;
}
resolve(this);
});
}
public | (resized: ImageThumbnail | | imageResized | identifier_name |
file-record.ts | DummyFile {
name: string;
size: number;
type: string;
lastModified: number;
lastModifiedDate: Date;
}
interface UploadData {
data: any;
error: string | false;
}
export { Dimensions, Options, RawFileRecord };
class FileRecord {
public static getFromRaw(
rawFileRecord: RawFileRecord,
options: Options,
isSync = false
): FileRecord | Promise<FileRecord> {
const fileRecord = new FileRecord(rawFileRecord, options);
const promise = fileRecord.setUrl(rawFileRecord.url as string);
rawFileRecord.progress = fileRecord.progress.bind(fileRecord); // convert it as a function
rawFileRecord.src = fileRecord.src.bind(fileRecord);
rawFileRecord.name = fileRecord.name.bind(fileRecord); // convert it as a function
if (isSync) {
return fileRecord;
}
return promise;
}
public static fromRaw(rawFileRecord: RawFileRecord, options: Options): Promise<FileRecord> {
return FileRecord.getFromRaw(rawFileRecord, options, false) as Promise<FileRecord>;
}
public static fromRawSync(rawFileRecord: RawFileRecord, options: Options): FileRecord {
return FileRecord.getFromRaw(rawFileRecord, options, true) as FileRecord;
}
public static fromRawArray(rawFileRecords: RawFileRecord[], options: Options): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const rawFileRecord of rawFileRecords) {
promises.push(FileRecord.fromRaw(rawFileRecord, options));
}
return Promise.all(promises);
}
public static toRawArray(fileRecords: FileRecord[]): RawFileRecord[] {
const rawFileRecords: RawFileRecord[] = [];
for (const fileRecord of fileRecords) {
rawFileRecords.push(fileRecord.toRaw());
}
return rawFileRecords;
}
public static readFile(fileRecord: FileRecord): Promise<FileRecord> {
return new Promise((resolve, reject) => {
if (!fileRecord.read) {
fileRecord.setUrl(null).then(
() => {
resolve(fileRecord);
},
(err) => {
// ignore error
resolve(fileRecord);
}
);
return;
}
utils.getDataURL(fileRecord.file).then((dataUrl) => {
fileRecord.setUrl(dataUrl).then(() => {
resolve(fileRecord);
}, reject);
}, reject);
});
}
public static readFiles(fileRecords: FileRecord[]): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const fileRecord of fileRecords) {
promises.push(FileRecord.readFile(fileRecord));
}
return Promise.all(promises);
}
public urlValue: null | string = null;
public urlResized: null | string = null;
public image: HTMLImageElement | {} = {};
public isPlayingAv: boolean = false;
public oldFileName: string | null = null;
public oldCustomName: string | null = null;
public upload: UploadData = { data: null, error: false };
public raw: RawFileRecord;
public progressInternal: number;
public accept?: string;
public dimensions: Dimensions;
public error: false | ErrorFlags;
public file: File;
public height: undefined | number | string;
public width: undefined | number | string;
public id: string;
public imageColor?: RGBA;
public lastKnownSrc: null | string;
public maxSize?: string;
public options: Options;
public read: boolean;
public thumbnailSize: number;
public videoThumbnail: any;
public customName: any;
public xhr?: XMLHttpRequest;
public xhrQueue?: () => any;
public stopAv?: (() => any) | null;
public tusUpload?: any;
public calculateAverageColor: boolean;
public constructor(data: RawFileRecord, options: Options) {
this.urlValue = null;
this.urlResized = null;
this.lastKnownSrc = null;
this.image = {};
this.isPlayingAv = false;
this.oldFileName = null;
this.oldCustomName = null;
this.raw = data;
this.file = data.file instanceof File ? data.file : (this.createDummyFile(data) as any);
this.progressInternal = !isNaN(data.progress as number) ? (data.progress as number) : 0;
// this.width = FileRecord.defaultWidth;
// this.height = FileRecord.defaultHeight;
this.thumbnailSize = options.thumbnailSize || 360;
this.read = !!options.read;
this.dimensions = data.dimensions || { width: 0, height: 0 };
this.dimensions.width = this.dimensions.width || 0;
this.dimensions.height = this.dimensions.height || 0;
this.error = data.error || false;
this.options = options;
this.maxSize = options.maxSize;
this.accept = options.accept;
this.id = Math.random() + ':' + new Date().getTime();
this.videoThumbnail = data.videoThumbnail;
this.imageColor = data.imageColor;
this.customName = data.customName;
this.calculateAverageColor = options.averageColor !== undefined ? options.averageColor : true;
this.validate();
}
// populate(data, options = {}) {}
public createDummyFile(data: RawFileRecord): DummyFile {
const file: DummyFile = {} as DummyFile;
file.lastModified = data.lastModified;
const d = new Date();
if (file.lastModified) {
d.setTime(file.lastModified);
}
file.lastModifiedDate = d;
file.name = typeof data.name === 'function' ? data.name() : data.name;
file.size = data.size;
file.type = data.type;
return file;
}
public hasProgress(): boolean {
return !isNaN(this.progressInternal); // && this._progress <= 100;
}
public progress(value?: number): number | void {
if (value !== undefined) {
this.progressInternal = value;
return;
}
return this.progressInternal || 0;
}
public url(value?: string): string | undefined | Promise<this> {
if (value !== undefined) {
return this.setUrl(value);
}
return this.urlValue || undefined;
}
public src(): string {
if (this.isImage()) {
return this.urlResized || this.urlValue || (this.file as any).url;
}
if (this.isPlayableVideo()) {
return this.videoThumbnail || '';
}
return '';
}
public size(): string {
if (!this.file) {
return '';
}
return utils.getSizeFormatted(this.file.size);
}
public ext(): string {
if (this.file && this.file.name.indexOf('.') !== -1) {
return (this.file.name as any).split('.').pop();
}
return '?';
// return this.file.type.split('/').shift();
}
public name(withoutExt?: boolean): string {
const ext = this.ext();
if (this.customName) {
return this.customName + (withoutExt ? '' : ext !== '?' ? '.' + ext : '');
}
const name = this.file && this.file.name;
if (withoutExt) {
if (ext !== '?') {
return name.substr(0, name.length - (ext.length + 1));
}
}
return name;
}
public isDarkColor(): boolean {
if (this.imageColor) {
const rgb = this.imageColor;
const darkPoint = 20;
return rgb[0] <= darkPoint && rgb[1] <= darkPoint && rgb[2] <= darkPoint;
}
return false;
}
public color(): string {
if (this.imageColor) {
const rgb = this.imageColor;
return 'rgb(' + rgb[0] + ', ' + rgb[1] + ', ' + rgb[2] + ')';
}
if (this.isImage()) {
return 'transparent';
}
const ext = this.ext();
const svgIcon = this.icon();
// var svgIcon = getIconFromExt(ext);
if (svgIcon.color) {
return svgIcon.color;
}
return utils.getColorForText(ext);
}
public isImage(): boolean {
return this.file && !!this.file.type.match(/image((?!vnd).)*$/i); |
public isPlayableVideo(): boolean {
return this.icon().category === 'video-playable';
}
public isAudio(): boolean {
return this.file && this.file.type.indexOf('audio') !== -1;
}
public isPlayableAudio(): boolean {
return this.icon().category === 'audio-playable';
}
public isText(): boolean {
return this.file && this.file.type.indexOf('text') !== -1;
}
public setUrl(url: string | null): Promise<this> {
this.urlValue = url;
return new Promise((resolve, reject) => {
if (this.isImage()) {
this.resizeImage().then(
() => {
resolve(this);
},
(err) => {
resolve(this);
}
);
return;
}
resolve(this);
});
}
public imageResized(resized: ImageThumbnail | | }
public isVideo(): boolean {
return this.file && this.file.type.indexOf('video') !== -1;
} | random_line_split |
file-record.ts | File {
name: string;
size: number;
type: string;
lastModified: number;
lastModifiedDate: Date;
}
interface UploadData {
data: any;
error: string | false;
}
export { Dimensions, Options, RawFileRecord };
class FileRecord {
public static getFromRaw(
rawFileRecord: RawFileRecord,
options: Options,
isSync = false
): FileRecord | Promise<FileRecord> {
const fileRecord = new FileRecord(rawFileRecord, options);
const promise = fileRecord.setUrl(rawFileRecord.url as string);
rawFileRecord.progress = fileRecord.progress.bind(fileRecord); // convert it as a function
rawFileRecord.src = fileRecord.src.bind(fileRecord);
rawFileRecord.name = fileRecord.name.bind(fileRecord); // convert it as a function
if (isSync) {
return fileRecord;
}
return promise;
}
public static fromRaw(rawFileRecord: RawFileRecord, options: Options): Promise<FileRecord> {
return FileRecord.getFromRaw(rawFileRecord, options, false) as Promise<FileRecord>;
}
public static fromRawSync(rawFileRecord: RawFileRecord, options: Options): FileRecord {
return FileRecord.getFromRaw(rawFileRecord, options, true) as FileRecord;
}
public static fromRawArray(rawFileRecords: RawFileRecord[], options: Options): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const rawFileRecord of rawFileRecords) {
promises.push(FileRecord.fromRaw(rawFileRecord, options));
}
return Promise.all(promises);
}
public static toRawArray(fileRecords: FileRecord[]): RawFileRecord[] {
const rawFileRecords: RawFileRecord[] = [];
for (const fileRecord of fileRecords) {
rawFileRecords.push(fileRecord.toRaw());
}
return rawFileRecords;
}
public static readFile(fileRecord: FileRecord): Promise<FileRecord> {
return new Promise((resolve, reject) => {
if (!fileRecord.read) {
fileRecord.setUrl(null).then(
() => {
resolve(fileRecord);
},
(err) => {
// ignore error
resolve(fileRecord);
}
);
return;
}
utils.getDataURL(fileRecord.file).then((dataUrl) => {
fileRecord.setUrl(dataUrl).then(() => {
resolve(fileRecord);
}, reject);
}, reject);
});
}
public static readFiles(fileRecords: FileRecord[]): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const fileRecord of fileRecords) {
promises.push(FileRecord.readFile(fileRecord));
}
return Promise.all(promises);
}
public urlValue: null | string = null;
public urlResized: null | string = null;
public image: HTMLImageElement | {} = {};
public isPlayingAv: boolean = false;
public oldFileName: string | null = null;
public oldCustomName: string | null = null;
public upload: UploadData = { data: null, error: false };
public raw: RawFileRecord;
public progressInternal: number;
public accept?: string;
public dimensions: Dimensions;
public error: false | ErrorFlags;
public file: File;
public height: undefined | number | string;
public width: undefined | number | string;
public id: string;
public imageColor?: RGBA;
public lastKnownSrc: null | string;
public maxSize?: string;
public options: Options;
public read: boolean;
public thumbnailSize: number;
public videoThumbnail: any;
public customName: any;
public xhr?: XMLHttpRequest;
public xhrQueue?: () => any;
public stopAv?: (() => any) | null;
public tusUpload?: any;
public calculateAverageColor: boolean;
public constructor(data: RawFileRecord, options: Options) {
this.urlValue = null;
this.urlResized = null;
this.lastKnownSrc = null;
this.image = {};
this.isPlayingAv = false;
this.oldFileName = null;
this.oldCustomName = null;
this.raw = data;
this.file = data.file instanceof File ? data.file : (this.createDummyFile(data) as any);
this.progressInternal = !isNaN(data.progress as number) ? (data.progress as number) : 0;
// this.width = FileRecord.defaultWidth;
// this.height = FileRecord.defaultHeight;
this.thumbnailSize = options.thumbnailSize || 360;
this.read = !!options.read;
this.dimensions = data.dimensions || { width: 0, height: 0 };
this.dimensions.width = this.dimensions.width || 0;
this.dimensions.height = this.dimensions.height || 0;
this.error = data.error || false;
this.options = options;
this.maxSize = options.maxSize;
this.accept = options.accept;
this.id = Math.random() + ':' + new Date().getTime();
this.videoThumbnail = data.videoThumbnail;
this.imageColor = data.imageColor;
this.customName = data.customName;
this.calculateAverageColor = options.averageColor !== undefined ? options.averageColor : true;
this.validate();
}
// populate(data, options = {}) {}
public createDummyFile(data: RawFileRecord): DummyFile {
const file: DummyFile = {} as DummyFile;
file.lastModified = data.lastModified;
const d = new Date();
if (file.lastModified) {
d.setTime(file.lastModified);
}
file.lastModifiedDate = d;
file.name = typeof data.name === 'function' ? data.name() : data.name;
file.size = data.size;
file.type = data.type;
return file;
}
public hasProgress(): boolean {
return !isNaN(this.progressInternal); // && this._progress <= 100;
}
public progress(value?: number): number | void {
if (value !== undefined) {
this.progressInternal = value;
return;
}
return this.progressInternal || 0;
}
public url(value?: string): string | undefined | Promise<this> {
if (value !== undefined) {
return this.setUrl(value);
}
return this.urlValue || undefined;
}
public src(): string {
if (this.isImage()) {
return this.urlResized || this.urlValue || (this.file as any).url;
}
if (this.isPlayableVideo()) {
return this.videoThumbnail || '';
}
return '';
}
public size(): string {
if (!this.file) {
return '';
}
return utils.getSizeFormatted(this.file.size);
}
public ext(): string {
if (this.file && this.file.name.indexOf('.') !== -1) {
return (this.file.name as any).split('.').pop();
}
return '?';
// return this.file.type.split('/').shift();
}
public name(withoutExt?: boolean): string {
const ext = this.ext();
if (this.customName) {
return this.customName + (withoutExt ? '' : ext !== '?' ? '.' + ext : '');
}
const name = this.file && this.file.name;
if (withoutExt) {
if (ext !== '?') {
return name.substr(0, name.length - (ext.length + 1));
}
}
return name;
}
public isDarkColor(): boolean {
if (this.imageColor) {
const rgb = this.imageColor;
const darkPoint = 20;
return rgb[0] <= darkPoint && rgb[1] <= darkPoint && rgb[2] <= darkPoint;
}
return false;
}
public color(): string {
if (this.imageColor) {
const rgb = this.imageColor;
return 'rgb(' + rgb[0] + ', ' + rgb[1] + ', ' + rgb[2] + ')';
}
if (this.isImage()) {
return 'transparent';
}
const ext = this.ext();
const svgIcon = this.icon();
// var svgIcon = getIconFromExt(ext);
if (svgIcon.color) {
return svgIcon.color;
}
return utils.getColorForText(ext);
}
public isImage(): boolean {
return this.file && !!this.file.type.match(/image((?!vnd).)*$/i);
}
public isVideo(): boolean {
return this.file && this.file.type.indexOf('video') !== -1;
}
public isPlayableVideo(): boolean {
return this.icon().category === 'video-playable';
}
public isAudio(): boolean {
return this.file && this.file.type.indexOf('audio') !== -1;
}
public isPlayableAudio(): boolean {
return this.icon().category === 'audio-playable';
}
public isText(): boolean {
return this.file && this.file.type.indexOf('text') !== -1;
}
public setUrl(url: string | null): Promise<this> |
public imageResized(resized: ImageThumbnail | | {
this.urlValue = url;
return new Promise((resolve, reject) => {
if (this.isImage()) {
this.resizeImage().then(
() => {
resolve(this);
},
(err) => {
resolve(this);
}
);
return;
}
resolve(this);
});
} | identifier_body |
file-record.ts | File {
name: string;
size: number;
type: string;
lastModified: number;
lastModifiedDate: Date;
}
interface UploadData {
data: any;
error: string | false;
}
export { Dimensions, Options, RawFileRecord };
class FileRecord {
public static getFromRaw(
rawFileRecord: RawFileRecord,
options: Options,
isSync = false
): FileRecord | Promise<FileRecord> {
const fileRecord = new FileRecord(rawFileRecord, options);
const promise = fileRecord.setUrl(rawFileRecord.url as string);
rawFileRecord.progress = fileRecord.progress.bind(fileRecord); // convert it as a function
rawFileRecord.src = fileRecord.src.bind(fileRecord);
rawFileRecord.name = fileRecord.name.bind(fileRecord); // convert it as a function
if (isSync) {
return fileRecord;
}
return promise;
}
public static fromRaw(rawFileRecord: RawFileRecord, options: Options): Promise<FileRecord> {
return FileRecord.getFromRaw(rawFileRecord, options, false) as Promise<FileRecord>;
}
public static fromRawSync(rawFileRecord: RawFileRecord, options: Options): FileRecord {
return FileRecord.getFromRaw(rawFileRecord, options, true) as FileRecord;
}
public static fromRawArray(rawFileRecords: RawFileRecord[], options: Options): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const rawFileRecord of rawFileRecords) {
promises.push(FileRecord.fromRaw(rawFileRecord, options));
}
return Promise.all(promises);
}
public static toRawArray(fileRecords: FileRecord[]): RawFileRecord[] {
const rawFileRecords: RawFileRecord[] = [];
for (const fileRecord of fileRecords) {
rawFileRecords.push(fileRecord.toRaw());
}
return rawFileRecords;
}
public static readFile(fileRecord: FileRecord): Promise<FileRecord> {
return new Promise((resolve, reject) => {
if (!fileRecord.read) {
fileRecord.setUrl(null).then(
() => {
resolve(fileRecord);
},
(err) => {
// ignore error
resolve(fileRecord);
}
);
return;
}
utils.getDataURL(fileRecord.file).then((dataUrl) => {
fileRecord.setUrl(dataUrl).then(() => {
resolve(fileRecord);
}, reject);
}, reject);
});
}
public static readFiles(fileRecords: FileRecord[]): Promise<FileRecord[]> {
const promises: Array<Promise<FileRecord>> = [];
for (const fileRecord of fileRecords) {
promises.push(FileRecord.readFile(fileRecord));
}
return Promise.all(promises);
}
public urlValue: null | string = null;
public urlResized: null | string = null;
public image: HTMLImageElement | {} = {};
public isPlayingAv: boolean = false;
public oldFileName: string | null = null;
public oldCustomName: string | null = null;
public upload: UploadData = { data: null, error: false };
public raw: RawFileRecord;
public progressInternal: number;
public accept?: string;
public dimensions: Dimensions;
public error: false | ErrorFlags;
public file: File;
public height: undefined | number | string;
public width: undefined | number | string;
public id: string;
public imageColor?: RGBA;
public lastKnownSrc: null | string;
public maxSize?: string;
public options: Options;
public read: boolean;
public thumbnailSize: number;
public videoThumbnail: any;
public customName: any;
public xhr?: XMLHttpRequest;
public xhrQueue?: () => any;
public stopAv?: (() => any) | null;
public tusUpload?: any;
public calculateAverageColor: boolean;
public constructor(data: RawFileRecord, options: Options) {
this.urlValue = null;
this.urlResized = null;
this.lastKnownSrc = null;
this.image = {};
this.isPlayingAv = false;
this.oldFileName = null;
this.oldCustomName = null;
this.raw = data;
this.file = data.file instanceof File ? data.file : (this.createDummyFile(data) as any);
this.progressInternal = !isNaN(data.progress as number) ? (data.progress as number) : 0;
// this.width = FileRecord.defaultWidth;
// this.height = FileRecord.defaultHeight;
this.thumbnailSize = options.thumbnailSize || 360;
this.read = !!options.read;
this.dimensions = data.dimensions || { width: 0, height: 0 };
this.dimensions.width = this.dimensions.width || 0;
this.dimensions.height = this.dimensions.height || 0;
this.error = data.error || false;
this.options = options;
this.maxSize = options.maxSize;
this.accept = options.accept;
this.id = Math.random() + ':' + new Date().getTime();
this.videoThumbnail = data.videoThumbnail;
this.imageColor = data.imageColor;
this.customName = data.customName;
this.calculateAverageColor = options.averageColor !== undefined ? options.averageColor : true;
this.validate();
}
// populate(data, options = {}) {}
public createDummyFile(data: RawFileRecord): DummyFile {
const file: DummyFile = {} as DummyFile;
file.lastModified = data.lastModified;
const d = new Date();
if (file.lastModified) {
d.setTime(file.lastModified);
}
file.lastModifiedDate = d;
file.name = typeof data.name === 'function' ? data.name() : data.name;
file.size = data.size;
file.type = data.type;
return file;
}
public hasProgress(): boolean {
return !isNaN(this.progressInternal); // && this._progress <= 100;
}
public progress(value?: number): number | void {
if (value !== undefined) |
return this.progressInternal || 0;
}
public url(value?: string): string | undefined | Promise<this> {
if (value !== undefined) {
return this.setUrl(value);
}
return this.urlValue || undefined;
}
public src(): string {
if (this.isImage()) {
return this.urlResized || this.urlValue || (this.file as any).url;
}
if (this.isPlayableVideo()) {
return this.videoThumbnail || '';
}
return '';
}
public size(): string {
if (!this.file) {
return '';
}
return utils.getSizeFormatted(this.file.size);
}
public ext(): string {
if (this.file && this.file.name.indexOf('.') !== -1) {
return (this.file.name as any).split('.').pop();
}
return '?';
// return this.file.type.split('/').shift();
}
public name(withoutExt?: boolean): string {
const ext = this.ext();
if (this.customName) {
return this.customName + (withoutExt ? '' : ext !== '?' ? '.' + ext : '');
}
const name = this.file && this.file.name;
if (withoutExt) {
if (ext !== '?') {
return name.substr(0, name.length - (ext.length + 1));
}
}
return name;
}
public isDarkColor(): boolean {
if (this.imageColor) {
const rgb = this.imageColor;
const darkPoint = 20;
return rgb[0] <= darkPoint && rgb[1] <= darkPoint && rgb[2] <= darkPoint;
}
return false;
}
public color(): string {
if (this.imageColor) {
const rgb = this.imageColor;
return 'rgb(' + rgb[0] + ', ' + rgb[1] + ', ' + rgb[2] + ')';
}
if (this.isImage()) {
return 'transparent';
}
const ext = this.ext();
const svgIcon = this.icon();
// var svgIcon = getIconFromExt(ext);
if (svgIcon.color) {
return svgIcon.color;
}
return utils.getColorForText(ext);
}
public isImage(): boolean {
return this.file && !!this.file.type.match(/image((?!vnd).)*$/i);
}
public isVideo(): boolean {
return this.file && this.file.type.indexOf('video') !== -1;
}
public isPlayableVideo(): boolean {
return this.icon().category === 'video-playable';
}
public isAudio(): boolean {
return this.file && this.file.type.indexOf('audio') !== -1;
}
public isPlayableAudio(): boolean {
return this.icon().category === 'audio-playable';
}
public isText(): boolean {
return this.file && this.file.type.indexOf('text') !== -1;
}
public setUrl(url: string | null): Promise<this> {
this.urlValue = url;
return new Promise((resolve, reject) => {
if (this.isImage()) {
this.resizeImage().then(
() => {
resolve(this);
},
(err) => {
resolve(this);
}
);
return;
}
resolve(this);
});
}
public imageResized(resized: ImageThumbnail | | {
this.progressInternal = value;
return;
} | conditional_block |
script.js | Def.filter.maskBits = 9;*/
break;
case "polygon":
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsArray(details.points,details.points.length);
break;
case "block":
default:
details.width = details.width || this.defaults.width;
details.height = details.height || this.defaults.height;
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsBox(details.width/2, details.height/2);
if(details.sensor) this.fixtureDef.isSensor = true;
break;
}
this.body.CreateFixture(this.fixtureDef);
};
Body.prototype.defaults = {
shape: "block",
width: 4,
height: 4,
radius: 1
};
Body.prototype.fixtureDefaults = {
density: 2,
friction: 1,
restitution: 0.2
};
Body.prototype.definitionDefaults = {
active: true,
allowSleep: true,
angle: 0,
angularVelocity: 0,
awake: true,
bullet: false,
fixedRotation: false
};
Body.prototype.draw = function(context) {
var pos = this.body.GetPosition(),
angle = this.body.GetAngle();
context.save();
context.translate(pos.x,pos.y);
context.rotate(angle);
if(this.details.color) {
context.fillStyle = this.details.color;
switch(this.details.shape) {
case "circle":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "circle2":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "polygon":
var points = this.details.points;
context.beginPath();
context.moveTo(points[0].x,points[0].y);
for(var i=1;i<points.length;i++) {
context.lineTo(points[i].x,points[i].y);
}
context.fill();
break;
case "block":
context.fillRect(-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
default:
break;
}
}
if(this.details.image) {
context.drawImage(this.details.image,
-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
}
context.restore();
}
window.gameLoop = function() {
var tm = new Date().getTime();
requestAnimationFrame(gameLoop);
var dt = (tm - lastFrame) / 1000;
if(dt > 1/15) { dt = 1/15; }
physics.step(dt);
lastFrame = tm;
player.scores.mt = player.scores.mt+dt;
scoreMtObj.innerText = Math.round(player.scores.mt*10);
};
function createWorld() {
physics = window.physics = new Physics(cvObj);
physics.collision();
var inner_width = physics.element.width / physics.scale;
var inner_height = physics.element.height / physics.scale;
setPillarsAndWalls(physics);
setCoins(physics);
player.obj = new Body(physics, {shape: 'circle', image:player.balls[player.hits], x: 5, y: 20, width: 2, height:2, radius:1, userData:{name:'player'} }).body;
/* setInterval(function(){
//btnActions.keyActions();
var im = {x : 10.0, y : 1.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
}, 100); */
/*Event Bindings*/
//window.addEventListener("keydown", btnActions.keyActions, false);
for(var i=0; i<btnObj.length; i++){
btnObj[i].addEventListener("click", function(e){
switch(this.getAttribute('data-action')){
case 'resume':
btnActions.pauseOrResume();
break;
case 'start':
menuObj.style.display = 'none';
tipObj.style.display = 'block';
btnActions.pauseOrResume(true);
/*Event Bindings*/
window.addEventListener("keydown", btnActions.keyActions, false);
break;
}
});
}
}
btnActions = {
speeder:0,
keyActions: function(e){
if(e && e.which == 27){
btnActions.pauseOrResume();
return false;
}
if(is_started && physics.getPlayStatus()) return false;
if(player.hits == player.life){
return false;
}
if(e && !is_started){
tipObj.style.display = 'none';
physics.resume();
is_started = true;
}
var vel = player.obj.GetLinearVelocity();
vel.x = (player.hits) ? 10 - (player.hits*2) : 10;
btnActions.speeder = btnActions.speeder+0.2;
vel.x = vel.x+btnActions.speeder++;
vel.y = -10;
player.obj.SetLinearVelocity(vel);
var im = {x : 24.0, y : 0.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
//console.log(player.obj)
},
pauseOrResume: function(is_pause){
if(!physics.getPlayStatus()){
physics.pause();
pauseObj.style.display = 'block';
}else{
physics.resume();
pauseObj.style.display = 'none';
}
if(is_pause){
physics.pause();
return false;
}
}
}
function setPillarsAndWalls(physics){
var inner_height = physics.element.height/physics.scale;
var wt = 4, orig_ht = ht = inner_height/1.4;
var bool = 1;
var x, y, pad = 1;
var arr = [];
for (var i = -6 ; i <= ht-10; i++) {
arr.push(i);
}
//arr[Math.floor(Math.random()*arr.length)];
for (var i = 5; i < 3000; i++){
ht = ht - arr[Math.floor(Math.random()*arr.length)];
//console.log('ov: '+ht)
if(ht > orig_ht){
ht = orig_ht;
}else if(ht <= 15){
ht = 15;
}
//console.log('nv: '+ht)
if(bool){
x = (wt+pad)*(i);
y = 0;
bool = 0;
}else{
x = (wt+pad)*(i-1);
y = inner_height; //25
bool = 1;
}
beams.obj.push(new Body(physics, { image: beams.img, type: "static", x: x, y: y, height: ht, width: wt, userData:{name:'pillar'}, sensor: false }));
}
var beamWidth = beams.obj[beams.obj.length-1].details.x+8;
// Create some walls
walls.left = new Body(physics, { color: "rgb(93, 198, 250)", type: "static", x: 0, y: 0, height: physics.element.height, width: 0.5, userData:{name:'wall'} });
walls.right = new Body(physics, { color: "red", type: "static", x: beamWidth, y: 0, height: physics.element.height, width: 0.5, userData:{name:'wall'}});
walls.top = new Body(physics, { color: "rgb(93, 198, 250)", type: "static", x: 0, y: 0, height: 0.5, width: beamWidth, userData:{name:'wall'} });
walls.bottom = new Body(physics, { color: "rgb(72, 76, 77)", type: "static", x: beamWidth/2, y:inner_height, height: 0.5, width: beamWidth, userData:{name:'wall'} });
}
function setCoins(physics) | {
var x, y;
var counter = 0;
// 100 iterations
var increase = Math.PI * 2 / 100;
for (var i = 25; i <= 15000; i +=6 ) {
x = i;
y = Math.sin(counter) / 2 + 18;
counter += increase * i;
var coin = new Body(physics, { shape: 'circle2', image: coins.img[Math.floor(Math.random()*coins.img.length)], type: "static", x: x, y: y, height: 1.4, width: 1.4, radius:1.4/2, userData:{name:'coins', value:1, i:i}});
coins.obj.push(coin);
}
} | identifier_body |
|
script.js | obj.GetNext();
}
this.context.restore();
}
};
Physics.prototype.click = function(callback) {
var self = this;
function handleClick(e) {
e.preventDefault();
var point = {
x: (e.offsetX || e.layerX) / self.scale,
y: (e.offsetY || e.layerY) / self.scale
};
self.world.QueryPoint(function(fixture) {
callback(fixture.GetBody(), fixture, point);
}, point);
}
};
Physics.prototype.collision = function() {
this.listener = new Box2D.Dynamics.b2ContactListener();
this.listener.BeginContact = function(contact,impulse) {
if(physics.getGaveOver()) return false;
if(contact.GetFixtureB().GetBody().GetUserData().details.userData.name == 'coins'){
destroyObj.push(contact.GetFixtureB().GetBody());
var x = player.obj.GetUserData();
x.details.image = player.openBall;
player.obj.SetUserData(x);
player.scores.pt++;
scorePtObj.innerText = player.scores.pt;
//if(player.scores.pt >= )
}
if(contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'pillar' || contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'wall'){
//return false;
if(physics.getGaveOver()) return false;
if(contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'pillar')
player.hits++;
else
player.hits = player.life; //set game over
btnActions.speeder = 0;
var x = player.obj.GetUserData();
x.details.image = player.balls[player.hits]; //openBall
x.fixtureDef.density = 1;
x.fixtureDef.restitution = 1;
x.fixtureDef.friction = 4
player.obj.SetUserData(x);
showHiteffect();
if(player.hits >= player.life){
physics.setGaveOver();
window.removeEventListener("keydown", btnActions.keyActions, false);
overObj.style.display = 'block';
var collectedObj = overObj.querySelector('.collected');
collectedObj.innerHTML = 'Fruits Collected: <span>'+player.scores.pt+'</span>';
collectedObj.style.display = 'block';
return false;
}
}
}
this.listener.EndContact = function (contact) {
if(contact.GetFixtureB().GetBody().GetUserData().details.userData.name == 'coins'){
//physics.world.DestroyBody(contact.GetFixtureB().GetBody());
setTimeout(function(){
var x = player.obj.GetUserData();
x.details.image = player.balls[player.hits];
player.obj.SetUserData(x);
}, 200);
}
};
this.listener.PreSolve = function (contact, oldManifold) {
//console.log('hit')
};
this.listener.PostSolve = function(contact,impulse) {
var bodyA = contact.GetFixtureA().GetBody().GetUserData(),
bodyB = contact.GetFixtureB().GetBody().GetUserData();
if(bodyA.contact) { bodyA.contact(contact,impulse,true) }
if(bodyB.contact) { bodyB.contact(contact,impulse,false) }
//console.log('XXXXXX');console.log(contact);console.log(bodyA);console.log(bodyB);console.log('XXXXXX');
};
this.world.SetContactListener(this.listener);
};
Physics.prototype.resume = function() {
this.isPause = false;
}
Physics.prototype.pause = function() {
this.isPause = true;
}
| this.gaveOver = true;
}
Physics.prototype.getGaveOver = function() {
return this.gaveOver;
}
Physics.prototype.getPlayStatus = function() {
return this.isPause;
}
var Body = window.Body = function(physics,details) {
this.details = details = details || {};
// Create the definition
this.definition = new b2BodyDef();
// Set up the definition
for(var k in this.definitionDefaults) {
this.definition[k] = details[k] || this.definitionDefaults[k];
}
this.definition.position = new b2Vec2(details.x || 0, details.y || 0);
this.definition.linearVelocity = new b2Vec2(details.vx || 0, details.vy || 0);
this.definition.userData = this;
this.definition.type = details.type == "static" ? b2Body.b2_staticBody : b2Body.b2_dynamicBody;
// Create the Body
this.body = physics.world.CreateBody(this.definition);
// Create the fixture
this.fixtureDef = new b2FixtureDef();
for(var l in this.fixtureDefaults) {
this.fixtureDef[l] = details[l] || this.fixtureDefaults[l];
}
details.shape = details.shape || this.defaults.shape;
switch(details.shape) {
case "circle":
details.radius = details.radius || this.defaults.radius;
this.fixtureDef.shape = new b2CircleShape(details.radius);
break;
case "circle2":
details.radius = details.radius || this.defaults.radius;
this.fixtureDef.shape = new b2CircleShape(details.radius);
this.fixtureDef.isSensor = true;
/*coin.fixtureDef.friction = 0;
coin.fixtureDef.density = 0;
coin.fixtureDef.restitution = 0;
coin.fixtureDef.filter.categoryBits = 4;
coin.fixtureDef.filter.maskBits = 9;*/
break;
case "polygon":
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsArray(details.points,details.points.length);
break;
case "block":
default:
details.width = details.width || this.defaults.width;
details.height = details.height || this.defaults.height;
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsBox(details.width/2, details.height/2);
if(details.sensor) this.fixtureDef.isSensor = true;
break;
}
this.body.CreateFixture(this.fixtureDef);
};
Body.prototype.defaults = {
shape: "block",
width: 4,
height: 4,
radius: 1
};
Body.prototype.fixtureDefaults = {
density: 2,
friction: 1,
restitution: 0.2
};
Body.prototype.definitionDefaults = {
active: true,
allowSleep: true,
angle: 0,
angularVelocity: 0,
awake: true,
bullet: false,
fixedRotation: false
};
Body.prototype.draw = function(context) {
var pos = this.body.GetPosition(),
angle = this.body.GetAngle();
context.save();
context.translate(pos.x,pos.y);
context.rotate(angle);
if(this.details.color) {
context.fillStyle = this.details.color;
switch(this.details.shape) {
case "circle":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "circle2":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "polygon":
var points = this.details.points;
context.beginPath();
context.moveTo(points[0].x,points[0].y);
for(var i=1;i<points.length;i++) {
context.lineTo(points[i].x,points[i].y);
}
context.fill();
break;
case "block":
context.fillRect(-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
default:
break;
}
}
if(this.details.image) {
context.drawImage(this.details.image,
-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
}
context.restore();
}
window.gameLoop = function() {
var tm = new Date().getTime();
requestAnimationFrame(gameLoop);
var dt = (tm - lastFrame) / 1000;
if(dt > 1/15) { dt = 1/15; }
physics.step(dt);
lastFrame = tm;
player.scores.mt = player.scores.mt+dt;
scoreMtObj.innerText = Math.round(player.scores.mt*10);
};
function createWorld() {
physics = window.physics = new Physics(cvObj);
physics.collision();
var inner_width = physics.element.width / physics.scale;
var inner_height = physics.element.height / physics.scale;
setPillarsAndWalls(physics);
setCoins(physics);
player.obj = new Body(physics, {shape: 'circle', image:player.balls[player.hits], x: 5, y: 20, width: 2, height:2, radius:1, userData:{name:'player'} }).body;
/* setInterval(function(){
//btnActions.keyActions();
var im = {x : 10.0, y : 1.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
}, | Physics.prototype.setGaveOver = function() { | random_line_split |
script.js | .gameLoop = function() {
var tm = new Date().getTime();
requestAnimationFrame(gameLoop);
var dt = (tm - lastFrame) / 1000;
if(dt > 1/15) { dt = 1/15; }
physics.step(dt);
lastFrame = tm;
player.scores.mt = player.scores.mt+dt;
scoreMtObj.innerText = Math.round(player.scores.mt*10);
};
function createWorld() {
physics = window.physics = new Physics(cvObj);
physics.collision();
var inner_width = physics.element.width / physics.scale;
var inner_height = physics.element.height / physics.scale;
setPillarsAndWalls(physics);
setCoins(physics);
player.obj = new Body(physics, {shape: 'circle', image:player.balls[player.hits], x: 5, y: 20, width: 2, height:2, radius:1, userData:{name:'player'} }).body;
/* setInterval(function(){
//btnActions.keyActions();
var im = {x : 10.0, y : 1.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
}, 100); */
/*Event Bindings*/
//window.addEventListener("keydown", btnActions.keyActions, false);
for(var i=0; i<btnObj.length; i++){
btnObj[i].addEventListener("click", function(e){
switch(this.getAttribute('data-action')){
case 'resume':
btnActions.pauseOrResume();
break;
case 'start':
menuObj.style.display = 'none';
tipObj.style.display = 'block';
btnActions.pauseOrResume(true);
/*Event Bindings*/
window.addEventListener("keydown", btnActions.keyActions, false);
break;
}
});
}
}
btnActions = {
speeder:0,
keyActions: function(e){
if(e && e.which == 27){
btnActions.pauseOrResume();
return false;
}
if(is_started && physics.getPlayStatus()) return false;
if(player.hits == player.life){
return false;
}
if(e && !is_started){
tipObj.style.display = 'none';
physics.resume();
is_started = true;
}
var vel = player.obj.GetLinearVelocity();
vel.x = (player.hits) ? 10 - (player.hits*2) : 10;
btnActions.speeder = btnActions.speeder+0.2;
vel.x = vel.x+btnActions.speeder++;
vel.y = -10;
player.obj.SetLinearVelocity(vel);
var im = {x : 24.0, y : 0.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
//console.log(player.obj)
},
pauseOrResume: function(is_pause){
if(!physics.getPlayStatus()){
physics.pause();
pauseObj.style.display = 'block';
}else{
physics.resume();
pauseObj.style.display = 'none';
}
if(is_pause){
physics.pause();
return false;
}
}
}
function setPillarsAndWalls(physics){
var inner_height = physics.element.height/physics.scale;
var wt = 4, orig_ht = ht = inner_height/1.4;
var bool = 1;
var x, y, pad = 1;
var arr = [];
for (var i = -6 ; i <= ht-10; i++) {
arr.push(i);
}
//arr[Math.floor(Math.random()*arr.length)];
for (var i = 5; i < 3000; i++){
ht = ht - arr[Math.floor(Math.random()*arr.length)];
//console.log('ov: '+ht)
if(ht > orig_ht){
ht = orig_ht;
}else if(ht <= 15){
ht = 15;
}
//console.log('nv: '+ht)
if(bool){
x = (wt+pad)*(i);
y = 0;
bool = 0;
}else{
x = (wt+pad)*(i-1);
y = inner_height; //25
bool = 1;
}
beams.obj.push(new Body(physics, { image: beams.img, type: "static", x: x, y: y, height: ht, width: wt, userData:{name:'pillar'}, sensor: false }));
}
var beamWidth = beams.obj[beams.obj.length-1].details.x+8;
// Create some walls
walls.left = new Body(physics, { color: "rgb(93, 198, 250)", type: "static", x: 0, y: 0, height: physics.element.height, width: 0.5, userData:{name:'wall'} });
walls.right = new Body(physics, { color: "red", type: "static", x: beamWidth, y: 0, height: physics.element.height, width: 0.5, userData:{name:'wall'}});
walls.top = new Body(physics, { color: "rgb(93, 198, 250)", type: "static", x: 0, y: 0, height: 0.5, width: beamWidth, userData:{name:'wall'} });
walls.bottom = new Body(physics, { color: "rgb(72, 76, 77)", type: "static", x: beamWidth/2, y:inner_height, height: 0.5, width: beamWidth, userData:{name:'wall'} });
}
function setCoins(physics){
var x, y;
var counter = 0;
// 100 iterations
var increase = Math.PI * 2 / 100;
for (var i = 25; i <= 15000; i +=6 ) {
x = i;
y = Math.sin(counter) / 2 + 18;
counter += increase * i;
var coin = new Body(physics, { shape: 'circle2', image: coins.img[Math.floor(Math.random()*coins.img.length)], type: "static", x: x, y: y, height: 1.4, width: 1.4, radius:1.4/2, userData:{name:'coins', value:1, i:i}});
coins.obj.push(coin);
}
}
function init() {
var preloader = [];
var imgArray = ['images/flappy-pacman-logo.png', 'images/log.png', 'images/smily-40.png', 'images/smily-40-1.png', 'images/smily-40-2.png', 'images/smily-40-3.png', 'images/coins/c1.png', 'images/coins/c2.png', 'images/coins/c3.png', 'images/coins/c4.png', 'images/coins/c5.png', 'images/coins/c6.png', 'images/coins/c7.png', 'images/coins/c8.png', 'images/coins/c9.png', 'images/coins/c10.png', 'images/coins/c11.png', 'images/coins/c12.png', 'images/coins/c13.png', 'images/coins/c14.png', 'images/coins/c15.png', 'images/coins/c16.png', 'images/coins/c17.png', 'images/coins/c18.png', 'images/coins/c19.png', 'images/coins/c20.png', 'images/coins/c21.png', 'images/bang-hit.png'];
var $i = 0;
function loadImg(){
preloader[$i] = new Image();
preloader[$i].src = imgArray[$i];
preloader[$i].onload = function(){
$i++;
if($i == imgArray.length){
for(var i = 0; i<=21-1; i++){
coins.img[i] = new Image();
coins.img[i].src = 'images/coins/c'+(i+1)+'.png';
}
beams.img.src = 'images/log.png';
player.balls[0].src = 'images/smily-40.png';
player.balls[1].src = 'images/smily-40-1.png';
player.balls[2].src = 'images/smily-40-2.png';
player.balls[3].src = 'images/smily-40-3.png';
player.openBall.src = 'images/smily-40-eat.png';
createWorld();
requestAnimationFrame(gameLoop);
setTimeout(function(){physics.pause();}, 1000);
loaderObj.style.display = 'none';
menuObj.style.display = 'block';
return true;
}
loadImg();
}
}
loadImg();
}
window.addEventListener("load",init);
}());
function | showHiteffect | identifier_name |
|
script.js | obj.GetNext();
}
this.context.restore();
}
};
Physics.prototype.click = function(callback) {
var self = this;
function handleClick(e) {
e.preventDefault();
var point = {
x: (e.offsetX || e.layerX) / self.scale,
y: (e.offsetY || e.layerY) / self.scale
};
self.world.QueryPoint(function(fixture) {
callback(fixture.GetBody(), fixture, point);
}, point);
}
};
Physics.prototype.collision = function() {
this.listener = new Box2D.Dynamics.b2ContactListener();
this.listener.BeginContact = function(contact,impulse) {
if(physics.getGaveOver()) return false;
if(contact.GetFixtureB().GetBody().GetUserData().details.userData.name == 'coins'){
destroyObj.push(contact.GetFixtureB().GetBody());
var x = player.obj.GetUserData();
x.details.image = player.openBall;
player.obj.SetUserData(x);
player.scores.pt++;
scorePtObj.innerText = player.scores.pt;
//if(player.scores.pt >= )
}
if(contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'pillar' || contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'wall'){
//return false;
if(physics.getGaveOver()) return false;
if(contact.GetFixtureA().GetBody().GetUserData().details.userData.name == 'pillar')
player.hits++;
else
player.hits = player.life; //set game over
btnActions.speeder = 0;
var x = player.obj.GetUserData();
x.details.image = player.balls[player.hits]; //openBall
x.fixtureDef.density = 1;
x.fixtureDef.restitution = 1;
x.fixtureDef.friction = 4
player.obj.SetUserData(x);
showHiteffect();
if(player.hits >= player.life){
physics.setGaveOver();
window.removeEventListener("keydown", btnActions.keyActions, false);
overObj.style.display = 'block';
var collectedObj = overObj.querySelector('.collected');
collectedObj.innerHTML = 'Fruits Collected: <span>'+player.scores.pt+'</span>';
collectedObj.style.display = 'block';
return false;
}
}
}
this.listener.EndContact = function (contact) {
if(contact.GetFixtureB().GetBody().GetUserData().details.userData.name == 'coins'){
//physics.world.DestroyBody(contact.GetFixtureB().GetBody());
setTimeout(function(){
var x = player.obj.GetUserData();
x.details.image = player.balls[player.hits];
player.obj.SetUserData(x);
}, 200);
}
};
this.listener.PreSolve = function (contact, oldManifold) {
//console.log('hit')
};
this.listener.PostSolve = function(contact,impulse) {
var bodyA = contact.GetFixtureA().GetBody().GetUserData(),
bodyB = contact.GetFixtureB().GetBody().GetUserData();
if(bodyA.contact) { bodyA.contact(contact,impulse,true) }
if(bodyB.contact) { bodyB.contact(contact,impulse,false) }
//console.log('XXXXXX');console.log(contact);console.log(bodyA);console.log(bodyB);console.log('XXXXXX');
};
this.world.SetContactListener(this.listener);
};
Physics.prototype.resume = function() {
this.isPause = false;
}
Physics.prototype.pause = function() {
this.isPause = true;
}
Physics.prototype.setGaveOver = function() {
this.gaveOver = true;
}
Physics.prototype.getGaveOver = function() {
return this.gaveOver;
}
Physics.prototype.getPlayStatus = function() {
return this.isPause;
}
var Body = window.Body = function(physics,details) {
this.details = details = details || {};
// Create the definition
this.definition = new b2BodyDef();
// Set up the definition
for(var k in this.definitionDefaults) {
this.definition[k] = details[k] || this.definitionDefaults[k];
}
this.definition.position = new b2Vec2(details.x || 0, details.y || 0);
this.definition.linearVelocity = new b2Vec2(details.vx || 0, details.vy || 0);
this.definition.userData = this;
this.definition.type = details.type == "static" ? b2Body.b2_staticBody : b2Body.b2_dynamicBody;
// Create the Body
this.body = physics.world.CreateBody(this.definition);
// Create the fixture
this.fixtureDef = new b2FixtureDef();
for(var l in this.fixtureDefaults) {
this.fixtureDef[l] = details[l] || this.fixtureDefaults[l];
}
details.shape = details.shape || this.defaults.shape;
switch(details.shape) {
case "circle":
details.radius = details.radius || this.defaults.radius;
this.fixtureDef.shape = new b2CircleShape(details.radius);
break;
case "circle2":
details.radius = details.radius || this.defaults.radius;
this.fixtureDef.shape = new b2CircleShape(details.radius);
this.fixtureDef.isSensor = true;
/*coin.fixtureDef.friction = 0;
coin.fixtureDef.density = 0;
coin.fixtureDef.restitution = 0;
coin.fixtureDef.filter.categoryBits = 4;
coin.fixtureDef.filter.maskBits = 9;*/
break;
case "polygon":
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsArray(details.points,details.points.length);
break;
case "block":
default:
details.width = details.width || this.defaults.width;
details.height = details.height || this.defaults.height;
this.fixtureDef.shape = new b2PolygonShape();
this.fixtureDef.shape.SetAsBox(details.width/2, details.height/2);
if(details.sensor) this.fixtureDef.isSensor = true;
break;
}
this.body.CreateFixture(this.fixtureDef);
};
Body.prototype.defaults = {
shape: "block",
width: 4,
height: 4,
radius: 1
};
Body.prototype.fixtureDefaults = {
density: 2,
friction: 1,
restitution: 0.2
};
Body.prototype.definitionDefaults = {
active: true,
allowSleep: true,
angle: 0,
angularVelocity: 0,
awake: true,
bullet: false,
fixedRotation: false
};
Body.prototype.draw = function(context) {
var pos = this.body.GetPosition(),
angle = this.body.GetAngle();
context.save();
context.translate(pos.x,pos.y);
context.rotate(angle);
if(this.details.color) {
context.fillStyle = this.details.color;
switch(this.details.shape) {
case "circle":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "circle2":
context.beginPath();
context.arc(0,0,this.details.radius,0,Math.PI*2);
context.fill();
break;
case "polygon":
var points = this.details.points;
context.beginPath();
context.moveTo(points[0].x,points[0].y);
for(var i=1;i<points.length;i++) {
context.lineTo(points[i].x,points[i].y);
}
context.fill();
break;
case "block":
context.fillRect(-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
default:
break;
}
}
if(this.details.image) |
context.restore();
}
window.gameLoop = function() {
var tm = new Date().getTime();
requestAnimationFrame(gameLoop);
var dt = (tm - lastFrame) / 1000;
if(dt > 1/15) { dt = 1/15; }
physics.step(dt);
lastFrame = tm;
player.scores.mt = player.scores.mt+dt;
scoreMtObj.innerText = Math.round(player.scores.mt*10);
};
function createWorld() {
physics = window.physics = new Physics(cvObj);
physics.collision();
var inner_width = physics.element.width / physics.scale;
var inner_height = physics.element.height / physics.scale;
setPillarsAndWalls(physics);
setCoins(physics);
player.obj = new Body(physics, {shape: 'circle', image:player.balls[player.hits], x: 5, y: 20, width: 2, height:2, radius:1, userData:{name:'player'} }).body;
/* setInterval(function(){
//btnActions.keyActions();
var im = {x : 10.0, y : 1.0}
player.obj.ApplyImpulse(im, player.obj.GetPosition());
| {
context.drawImage(this.details.image,
-this.details.width/2,
-this.details.height/2,
this.details.width,
this.details.height);
} | conditional_block |
mining_test.go | Asset, 1, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("7"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 4, false, 0, common.HexToHash("7"),
},
}, []*fakeOut{
{
keys[2].Address, 1e6, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e4 - 1, &asiutil.AsimovAsset,
}, {
keys[4].Address, 1e4, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[6].Address, 8e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 6},
}
//create tx depend last tx
fakeTxs = append(fakeTxs, &TxDesc{Tx: createFakeTx([]*fakeIn{
{
keys[5], 1e3, &asiutil.AsimovAsset, 4, false, 0x7FFFFFFF, *fakeTxs[len(fakeTxs)-1].Tx.Hash(),
},
}, []*fakeOut{
{
keys[0].Address, 1e3 - 2, &asiutil.AsimovAsset,
},
}, nil), GasPrice: 7})
invalidFakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, math.MaxUint32, true, 0, common.HexToHash("0"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("8"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 + 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 3, false, 0, common.HexToHash("8"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1, invaildAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, invaildAsset, 4, false, 0, common.HexToHash("8"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1, invaildAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 5, false, 0, common.HexToHash("8"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1, &asiutil.AsimovAsset,
},
}, nil), GasPrice: 1},
}
getFees := func(amounts int64) map[protos.Asset]int64 {
res := make(map[protos.Asset]int64)
res[asiutil.AsimovAsset] = amounts
return res
}
privateKey := "0xd0f0461b7b4d26cf370e6c73b58ef7fa26e8e30853a8cee901ed42cf0879cb6e"
account,_ := crypto.NewAccount(privateKey)
tests := []struct {
validator *crypto.Account
gasFloor uint64
gasCeil uint64
round uint32
slot uint16
txs TxDescList
wantTx []*common.Hash
wantFees map[protos.Asset]int64
wantOpCosts []int64
wantWeight uint16
wantErr bool
}{
{
account, 160000000, 160000000, 1, 0, TxDescList{},
[]*common.Hash{},
make(map[protos.Asset]int64),
[]int64{1}, 720, false,
}, {
account, 160000000, 160000000, 1, 0, fakeTxs[0:1],
[]*common.Hash{fakeTxs[0].Tx.Hash()},
getFees(1e4),
[]int64{1, 1}, 720, false,
}, {
account, 160000000, 160000000, 1, 0, fakeTxs[1:7],
[]*common.Hash{fakeTxs[5].Tx.Hash(), fakeTxs[6].Tx.Hash(), fakeTxs[4].Tx.Hash(), fakeTxs[3].Tx.Hash(), fakeTxs[2].Tx.Hash(), fakeTxs[1].Tx.Hash()},
getFees(1 + 1 + 1e12 + 1e4 + 1 + 1e4 + 3),
[]int64{1, 6, 1, 5, 1, 1, 1}, 720, false,
}, {
account, 160000000, 160000000, 1, 0, invalidFakeTxs,
[]*common.Hash{},
make(map[protos.Asset]int64),
[]int64{1}, 720, false,
}, {
keys[0], 160000000, 160000000, 1, 0, TxDescList{},
[]*common.Hash{},
make(map[protos.Asset]int64),
[]int64{1}, 0, true,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
fakeTxSource.clear()
for _, v := range test.txs {
fakeTxSource.push(v)
}
template, err := g.ProduceNewBlock(test.validator, test.gasFloor, test.gasCeil,
time.Now().Unix(), test.round, test.slot, 5*100000)
if err != nil {
if test.wantErr != true {
t.Errorf("tests #%d error %v", i, err)
}
continue
}
block := template.Block
txs := block.MsgBlock().Transactions
if block.MsgBlock().Header.CoinBase != *test.validator.Address ||
block.MsgBlock().Header.Round != test.round ||
block.MsgBlock().Header.SlotIndex != test.slot ||
block.MsgBlock().Header.Weight != test.wantWeight {
t.Errorf("tests #%d Coinbase: %v ,Round: %v ,Slot: %v Weight: %v",
i, block.MsgBlock().Header.CoinBase, block.MsgBlock().Header.Round, block.MsgBlock().Header.SlotIndex, block.MsgBlock().Header.Weight)
}
outTxEqual := func(ltxs []*protos.MsgTx, rtxs []*common.Hash) bool {
if len(ltxs) != len(rtxs) | {
return false
} | conditional_block |
|
mining_test.go | (t *testing.T) {
// Create some fake priority items that exercise the expected sort
// edge conditions.
testItems := []*TxPrioItem{
{gasPrice: 5678,},
{gasPrice: 1234,},
{gasPrice: 10001,},
{gasPrice: 0,},
}
// Add random data in addition to the edge conditions already manually
// specified.
randSeed := rand.Int63()
defer func() {
if t.Failed() {
t.Logf("Random numbers using seed: %v", randSeed)
}
}()
prng := rand.New(rand.NewSource(randSeed))
for i := 0; i < 1000; i++ {
testItems = append(testItems, &TxPrioItem{
gasPrice: prng.Float64() * 10000,
})
}
// Test sorting by fee per KB then priority.
var highest *TxPrioItem
priorityQueue := NewTxPriorityQueue(len(testItems))
for i := 0; i < len(testItems); i++ {
prioItem := testItems[i]
if highest == nil {
highest = prioItem
}
if prioItem.gasPrice >= highest.gasPrice {
highest = prioItem
}
heap.Push(priorityQueue, prioItem)
}
for i := 0; i < len(testItems); i++ {
prioItem := heap.Pop(priorityQueue).(*TxPrioItem)
if prioItem.gasPrice > highest.gasPrice {
t.Fatalf("fee sort: item (fee per KB: %v) higher than than prev "+
"(fee per KB: %v)", prioItem.gasPrice, highest.gasPrice, )
}
highest = prioItem
}
}
func TestCreateCoinbaseTx(t *testing.T) {
privKey, _ := crypto.NewPrivateKey(crypto.S256())
pkaddr, _ := address.NewAddressPubKey(privKey.PubKey().SerializeCompressed())
addr := pkaddr.AddressPubKeyHash()
tests := []struct {
validater common.IAddress
height int32
wantErr bool
}{
{
pkaddr,
1,
false,
}, {
addr,
1,
false,
}, {
&common.Address{},
1,
true,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
_, _, err := CreateCoinbaseTx(&chaincfg.MainNetParams, test.height, test.validater, nil)
if test.wantErr != (err != nil) {
t.Errorf("tests #%d error %v", i, err)
}
}
}
func TestNewBlockTemplate(t *testing.T) {
policy := Policy{
BlockProductedTimeOut: chaincfg.DefaultBlockProductedTimeOut,
TxConnectTimeOut: chaincfg.DefaultTxConnectTimeOut,
UtxoValidateTimeOut: chaincfg.DefaultUtxoValidateTimeOut,
}
chain, teardownFunc, err := newFakeChain(&chaincfg.MainNetParams)
if err != nil {
t.Error("newFakeChain error: ", err)
return
}
fakeTxSource := &fakeTxSource{make(map[common.Hash]*TxDesc)}
fakeSigSource := &fakeSigSource{make([]*asiutil.BlockSign, 0)}
g := NewBlkTmplGenerator(
&policy,
fakeTxSource,
fakeSigSource,
chain,
)
defer teardownFunc()
global_view := txo.NewUtxoViewpoint()
g.FetchUtxoView = func(tx *asiutil.Tx, dolock bool) (viewpoint *txo.UtxoViewpoint, e error) {
neededSet := make(map[protos.OutPoint]struct{})
prevOut := protos.OutPoint{Hash: *tx.Hash()}
for txOutIdx := range tx.MsgTx().TxOut {
prevOut.Index = uint32(txOutIdx)
neededSet[prevOut] = struct{}{}
}
if !blockchain.IsCoinBase(tx) {
for _, txIn := range tx.MsgTx().TxIn {
neededSet[txIn.PreviousOutPoint] = struct{}{}
}
}
// Request the utxos from the point of view of the end of the main
// chain.
view := txo.NewUtxoViewpoint()
for k, _ := range neededSet {
view.AddEntry(k,global_view.LookupEntry(k))
}
return view, nil
}
invaildAsset := protos.NewAsset(0, 0, 1)
keys := []*crypto.Account{}
for i := 0; i < 16; i++ {
privKey, _ := crypto.NewPrivateKey(crypto.S256())
pkaddr, _ := address.NewAddressPubKey(privKey.PubKey().SerializeCompressed())
addr := pkaddr.AddressPubKeyHash()
keys = append(keys, &crypto.Account {*privKey, *privKey.PubKey(),addr})
}
fakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 2},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e18, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("2"),
}, {
keys[0], 1e4, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("3"),
},
}, []*fakeOut{
{
keys[0].Address, 1e18 - 1e12, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 3},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1234567890, &asiutil.AsimovAsset, 3, false, 0, common.HexToHash("4"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 5, false, 0, common.HexToHash("4"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 8, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[2].Address, 1234567890 + 1e6, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 4},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e4, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[0].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[0].Address, 6e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 5},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("6"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("7"),
}, | TestTxPriceHeap | identifier_name |
|
mining_test.go | true,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
_, _, err := CreateCoinbaseTx(&chaincfg.MainNetParams, test.height, test.validater, nil)
if test.wantErr != (err != nil) {
t.Errorf("tests #%d error %v", i, err)
}
}
}
func TestNewBlockTemplate(t *testing.T) {
policy := Policy{
BlockProductedTimeOut: chaincfg.DefaultBlockProductedTimeOut,
TxConnectTimeOut: chaincfg.DefaultTxConnectTimeOut,
UtxoValidateTimeOut: chaincfg.DefaultUtxoValidateTimeOut,
}
chain, teardownFunc, err := newFakeChain(&chaincfg.MainNetParams)
if err != nil {
t.Error("newFakeChain error: ", err)
return
}
fakeTxSource := &fakeTxSource{make(map[common.Hash]*TxDesc)}
fakeSigSource := &fakeSigSource{make([]*asiutil.BlockSign, 0)}
g := NewBlkTmplGenerator(
&policy,
fakeTxSource,
fakeSigSource,
chain,
)
defer teardownFunc()
global_view := txo.NewUtxoViewpoint()
g.FetchUtxoView = func(tx *asiutil.Tx, dolock bool) (viewpoint *txo.UtxoViewpoint, e error) {
neededSet := make(map[protos.OutPoint]struct{})
prevOut := protos.OutPoint{Hash: *tx.Hash()}
for txOutIdx := range tx.MsgTx().TxOut {
prevOut.Index = uint32(txOutIdx)
neededSet[prevOut] = struct{}{}
}
if !blockchain.IsCoinBase(tx) {
for _, txIn := range tx.MsgTx().TxIn {
neededSet[txIn.PreviousOutPoint] = struct{}{}
}
}
// Request the utxos from the point of view of the end of the main
// chain.
view := txo.NewUtxoViewpoint()
for k, _ := range neededSet {
view.AddEntry(k,global_view.LookupEntry(k))
}
return view, nil
}
invaildAsset := protos.NewAsset(0, 0, 1)
keys := []*crypto.Account{}
for i := 0; i < 16; i++ {
privKey, _ := crypto.NewPrivateKey(crypto.S256())
pkaddr, _ := address.NewAddressPubKey(privKey.PubKey().SerializeCompressed())
addr := pkaddr.AddressPubKeyHash()
keys = append(keys, &crypto.Account {*privKey, *privKey.PubKey(),addr})
}
fakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 2},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e18, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("2"),
}, {
keys[0], 1e4, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("3"),
},
}, []*fakeOut{
{
keys[0].Address, 1e18 - 1e12, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 3},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1234567890, &asiutil.AsimovAsset, 3, false, 0, common.HexToHash("4"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 5, false, 0, common.HexToHash("4"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 8, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[2].Address, 1234567890 + 1e6, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 4},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e4, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[0].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[0].Address, 6e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 5},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("6"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("7"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 4, false, 0, common.HexToHash("7"),
},
}, []*fakeOut{
{
keys[2].Address, 1e6, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e4 - 1, &asiutil.AsimovAsset,
}, {
keys[4].Address, 1e4, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[6].Address, 8e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 6},
}
//create tx depend last tx
fakeTxs = append(fakeTxs, &TxDesc{Tx: createFakeTx([]*fakeIn{
{
keys[5], 1e3, &asiutil.AsimovAsset, 4, false, 0x7FFFFFFF, *fakeTxs[len(fakeTxs)-1].Tx.Hash(),
},
}, []*fakeOut{
{
keys[0].Address, 1e3 - 2, &asiutil.AsimovAsset,
},
}, nil), GasPrice: 7})
invalidFakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asi | {
privKey, _ := crypto.NewPrivateKey(crypto.S256())
pkaddr, _ := address.NewAddressPubKey(privKey.PubKey().SerializeCompressed())
addr := pkaddr.AddressPubKeyHash()
tests := []struct {
validater common.IAddress
height int32
wantErr bool
}{
{
pkaddr,
1,
false,
}, {
addr,
1,
false,
}, {
&common.Address{},
1, | identifier_body |
|
mining_test.go | addr,
1,
false,
}, {
&common.Address{},
1,
true,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
_, _, err := CreateCoinbaseTx(&chaincfg.MainNetParams, test.height, test.validater, nil)
if test.wantErr != (err != nil) {
t.Errorf("tests #%d error %v", i, err)
}
}
}
func TestNewBlockTemplate(t *testing.T) {
policy := Policy{
BlockProductedTimeOut: chaincfg.DefaultBlockProductedTimeOut,
TxConnectTimeOut: chaincfg.DefaultTxConnectTimeOut,
UtxoValidateTimeOut: chaincfg.DefaultUtxoValidateTimeOut,
}
chain, teardownFunc, err := newFakeChain(&chaincfg.MainNetParams)
if err != nil {
t.Error("newFakeChain error: ", err)
return
}
fakeTxSource := &fakeTxSource{make(map[common.Hash]*TxDesc)}
fakeSigSource := &fakeSigSource{make([]*asiutil.BlockSign, 0)}
g := NewBlkTmplGenerator(
&policy,
fakeTxSource,
fakeSigSource,
chain,
)
defer teardownFunc()
global_view := txo.NewUtxoViewpoint()
g.FetchUtxoView = func(tx *asiutil.Tx, dolock bool) (viewpoint *txo.UtxoViewpoint, e error) {
neededSet := make(map[protos.OutPoint]struct{})
prevOut := protos.OutPoint{Hash: *tx.Hash()}
for txOutIdx := range tx.MsgTx().TxOut {
prevOut.Index = uint32(txOutIdx)
neededSet[prevOut] = struct{}{}
}
if !blockchain.IsCoinBase(tx) {
for _, txIn := range tx.MsgTx().TxIn {
neededSet[txIn.PreviousOutPoint] = struct{}{}
}
}
// Request the utxos from the point of view of the end of the main
// chain.
view := txo.NewUtxoViewpoint()
for k, _ := range neededSet {
view.AddEntry(k,global_view.LookupEntry(k))
}
return view, nil
}
invaildAsset := protos.NewAsset(0, 0, 1)
keys := []*crypto.Account{}
for i := 0; i < 16; i++ {
privKey, _ := crypto.NewPrivateKey(crypto.S256())
pkaddr, _ := address.NewAddressPubKey(privKey.PubKey().SerializeCompressed())
addr := pkaddr.AddressPubKeyHash()
keys = append(keys, &crypto.Account {*privKey, *privKey.PubKey(),addr})
}
fakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 2},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e18, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("2"),
}, {
keys[0], 1e4, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("3"),
},
}, []*fakeOut{
{
keys[0].Address, 1e18 - 1e12, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 3},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1234567890, &asiutil.AsimovAsset, 3, false, 0, common.HexToHash("4"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 5, false, 0, common.HexToHash("4"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 8, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[2].Address, 1234567890 + 1e6, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 4},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e4, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("5"),
},
}, []*fakeOut{
{
keys[0].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[1].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[0].Address, 6e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 5},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("6"),
}, {
keys[1], 1e6, &asiutil.AsimovAsset, 1, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("6"),
}, {
keys[2], 1e4, &asiutil.AsimovAsset, 2, false, 0, common.HexToHash("7"),
}, {
keys[3], 1e4, &asiutil.AsimovAsset, 4, false, 0, common.HexToHash("7"),
},
}, []*fakeOut{
{
keys[2].Address, 1e6, &asiutil.AsimovAsset,
}, {
keys[2].Address, 1e4 - 1, &asiutil.AsimovAsset,
}, {
keys[4].Address, 1e4, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[5].Address, 1e3, &asiutil.AsimovAsset,
}, {
keys[6].Address, 8e3 - 1, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 6},
}
//create tx depend last tx
fakeTxs = append(fakeTxs, &TxDesc{Tx: createFakeTx([]*fakeIn{
{
keys[5], 1e3, &asiutil.AsimovAsset, 4, false, 0x7FFFFFFF, *fakeTxs[len(fakeTxs)-1].Tx.Hash(),
},
}, []*fakeOut{
{
keys[0].Address, 1e3 - 2, &asiutil.AsimovAsset,
},
}, nil), GasPrice: 7})
invalidFakeTxs := TxDescList{
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, 0, false, 0, common.HexToHash("1"),
},
}, []*fakeOut{
{
keys[1].Address, 1e8 - 1e4, &asiutil.AsimovAsset,
},
}, global_view), GasPrice: 1},
{Tx: createFakeTx([]*fakeIn{
{
keys[0], 1e8, &asiutil.AsimovAsset, math.MaxUint32, true, 0, common.HexToHash("0"),
},
| pkaddr,
1,
false,
}, { | random_line_split |
|
__init__.py | Backend %s is not supported" % new_default_backend
)
__default_backend = new_default_backend
def get_default_backend():
"""Returns the default cryptensor backend (mpc, he)"""
return __default_backend
def cryptensor(*args, backend=None, **kwargs):
"""
Factory function to return encrypted tensor of given backend.
"""
if backend is None:
backend = get_default_backend()
if backend == crypten.mpc:
return backend.MPCTensor(*args, **kwargs)
else:
raise TypeError("Backend %s is not supported" % backend)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_przs():
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. The random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
# Initialize RNG Generators
comm.get().g0 = torch.Generator()
comm.get().g1 = torch.Generator()
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value.
# We sometimes get here from a forked process, which causes all parties
# to have the same RNG state. Reset the seed to make sure RNG streams
# are different in all the parties. We use numpy's random here since
# setting its seed to None will produce different seeds even from
# forked processes.
import numpy
numpy.random.seed(seed=None)
next_seed = torch.tensor(numpy.random.randint(-2 ** 63, 2 ** 63 - 1, (1,)))
prev_seed = torch.LongTensor([0]) # placeholder
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Otherwise sending seeds will segfault.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(tensor=next_seed, dst=next_rank)
req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
# Seed Generators
comm.get().g0.manual_seed(next_seed.item())
comm.get().g1.manual_seed(prev_seed.item())
def __validate_model(loaded_model, dummy_model):
"""Validates that two models have the same architecture"""
loaded_modules = [loaded_model]
dummy_modules = [dummy_model]
valid = torch.tensor(1, dtype=torch.long)
try:
while len(loaded_modules) > 0:
loaded_module = loaded_modules.pop(0)
dummy_module = dummy_modules.pop(0)
# Assert modules have the same number of parameters
loaded_params = [param for param in loaded_module.parameters()]
dummy_params = [param for param in dummy_module.parameters()]
assert len(loaded_params) == len(dummy_params)
for i, param in enumerate(loaded_params):
assert param.size() == dummy_params[i].size()
# Assert that modules have the same number of sub-modules
loaded_module_modules = [mod for mod in loaded_module.modules()][1:]
dummy_module_modules = [mod for mod in dummy_module.modules()][1:]
loaded_modules.extend(loaded_module_modules)
dummy_modules.extend(dummy_module_modules)
assert len(loaded_modules) == len(dummy_modules)
except AssertionError:
valid = torch.tensor(0, dtype=torch.long)
return valid
def load(f, encrypted=False, dummy_model=None, src=0, **kwargs):
"""
Loads an object saved with `torch.save()` or `crypten.save()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
dummy_model: Takes a model architecture to fill with the loaded model
(on the `src` party only). Non-source parties will return the
`dummy_model` input (with data unchanged). Loading a model will
assert the correctness of the model architecture provided against
the model loaded. This argument is ignored if the file loaded is
a tensor.
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from
"""
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# TODO: Use send_obj and recv_obj to send modules without requiring a
# dummy_model
# source party
if comm.get().get_rank() == src:
result = torch.load(f, **kwargs)
# file contains torch.tensor
if torch.is_tensor(result):
# Broadcast load type
load_type = torch.tensor(0, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Broadcast size to other parties.
dim = torch.tensor(result.dim(), dtype=torch.long)
size = torch.tensor(result.size(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
comm.get().broadcast(size, src=src)
result = cryptensor(result, src=src)
# file contains torch module
elif isinstance(result, torch.nn.Module):
# Broadcast load type
load_type = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Assert that dummy_model is provided
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
# Assert that model architectures are the same
valid = __validate_model(result, dummy_model)
comm.get().broadcast(valid, src=src) # Broadcast validation
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
# file contains unrecognized type
else:
# Broadcast load type
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# raise error
raise TypeError("Unrecognized load type %s" % type(result))
# Non-source party
else:
# Receive load type from source party
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Load in tensor
if load_type.item() == 0:
# Receive size from source party
dim = torch.empty(size=(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
size = torch.empty(size=(dim.item(),), dtype=torch.long)
comm.get().broadcast(size, src=src)
result = cryptensor(torch.empty(size=tuple(size.tolist())), src=src)
# Load module using dummy_model
elif load_type.item() == 1:
# Assert dummy_model is given
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
result = dummy_model
# Receive model architecture validation
valid = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(valid, src=src)
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
else:
raise TypeError("Unrecognized load type on src")
# TODO: Encrypt modules before returning them
return result
def save(obj, f, src=0, **kwargs):
| """
Saves a CrypTensor or PyTorch tensor to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
src: The source party that writes data to the specified file.
"""
if is_encrypted_tensor(obj):
raise NotImplementedError("Saving encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Save failed: src must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Save failed: src must be an integer in [0, world_size)"
if comm.get().get_rank() == src:
torch.save(obj, f, **kwargs)
| identifier_body |
|
__init__.py | _stats()
def reset_communication_stats():
comm.get().reset_communication_stats()
# Set backend
__SUPPORTED_BACKENDS = [crypten.mpc]
__default_backend = __SUPPORTED_BACKENDS[0]
def set_default_backend(new_default_backend):
"""Sets the default cryptensor backend (mpc, he)"""
global __default_backend
assert new_default_backend in __SUPPORTED_BACKENDS, (
"Backend %s is not supported" % new_default_backend
)
__default_backend = new_default_backend
def get_default_backend():
"""Returns the default cryptensor backend (mpc, he)"""
return __default_backend
def cryptensor(*args, backend=None, **kwargs):
"""
Factory function to return encrypted tensor of given backend.
"""
if backend is None:
backend = get_default_backend()
if backend == crypten.mpc:
return backend.MPCTensor(*args, **kwargs)
else:
raise TypeError("Backend %s is not supported" % backend)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_przs():
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. The random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
# Initialize RNG Generators
comm.get().g0 = torch.Generator()
comm.get().g1 = torch.Generator()
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value.
# We sometimes get here from a forked process, which causes all parties
# to have the same RNG state. Reset the seed to make sure RNG streams
# are different in all the parties. We use numpy's random here since
# setting its seed to None will produce different seeds even from
# forked processes.
import numpy
numpy.random.seed(seed=None)
next_seed = torch.tensor(numpy.random.randint(-2 ** 63, 2 ** 63 - 1, (1,)))
prev_seed = torch.LongTensor([0]) # placeholder
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Otherwise sending seeds will segfault.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(tensor=next_seed, dst=next_rank)
req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
# Seed Generators
comm.get().g0.manual_seed(next_seed.item())
comm.get().g1.manual_seed(prev_seed.item())
def __validate_model(loaded_model, dummy_model):
"""Validates that two models have the same architecture"""
loaded_modules = [loaded_model]
dummy_modules = [dummy_model]
valid = torch.tensor(1, dtype=torch.long)
try:
while len(loaded_modules) > 0:
|
except AssertionError:
valid = torch.tensor(0, dtype=torch.long)
return valid
def load(f, encrypted=False, dummy_model=None, src=0, **kwargs):
"""
Loads an object saved with `torch.save()` or `crypten.save()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
dummy_model: Takes a model architecture to fill with the loaded model
(on the `src` party only). Non-source parties will return the
`dummy_model` input (with data unchanged). Loading a model will
assert the correctness of the model architecture provided against
the model loaded. This argument is ignored if the file loaded is
a tensor.
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from
"""
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# TODO: Use send_obj and recv_obj to send modules without requiring a
# dummy_model
# source party
if comm.get().get_rank() == src:
result = torch.load(f, **kwargs)
# file contains torch.tensor
if torch.is_tensor(result):
# Broadcast load type
load_type = torch.tensor(0, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Broadcast size to other parties.
dim = torch.tensor(result.dim(), dtype=torch.long)
size = torch.tensor(result.size(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
comm.get().broadcast(size, src=src)
result = cryptensor(result, src=src)
# file contains torch module
elif isinstance(result, torch.nn.Module):
# Broadcast load type
load_type = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Assert that dummy_model is provided
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
# Assert that model architectures are the same
valid = __validate_model(result, dummy_model)
comm.get().broadcast(valid, src=src) # Broadcast validation
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
# file contains unrecognized type
else:
# Broadcast load type
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# raise error
raise TypeError("Unrecognized load type %s" % type(result))
# Non-source party
else:
# Receive load type from source party
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Load in tensor
if load_type.item() == 0:
# Receive size from source party
dim = torch.empty(size=(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
size = torch.empty(size=(dim.item(),), dtype=torch.long)
comm.get().broadcast(size, src=src)
result = cryptensor(torch.empty(size=tuple(size.tolist())), src=src)
# Load module using dummy_model
elif load_type.item() == 1:
# Assert dummy_model is given
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
result = dummy_model
# Receive model architecture validation
valid = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(valid, src=src)
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
else:
raise TypeError("Unrecognized load type on src")
# TODO: Encrypt modules before returning them
return result
def save(obj, f, src=0, **kwargs):
"""
Saves a CrypTensor or PyTorch tensor to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
src: The source party that writes data to the specified file.
"""
if is_encrypted_tensor(obj | loaded_module = loaded_modules.pop(0)
dummy_module = dummy_modules.pop(0)
# Assert modules have the same number of parameters
loaded_params = [param for param in loaded_module.parameters()]
dummy_params = [param for param in dummy_module.parameters()]
assert len(loaded_params) == len(dummy_params)
for i, param in enumerate(loaded_params):
assert param.size() == dummy_params[i].size()
# Assert that modules have the same number of sub-modules
loaded_module_modules = [mod for mod in loaded_module.modules()][1:]
dummy_module_modules = [mod for mod in dummy_module.modules()][1:]
loaded_modules.extend(loaded_module_modules)
dummy_modules.extend(dummy_module_modules)
assert len(loaded_modules) == len(dummy_modules) | conditional_block |
__init__.py | _stats()
def reset_communication_stats():
comm.get().reset_communication_stats()
# Set backend
__SUPPORTED_BACKENDS = [crypten.mpc]
__default_backend = __SUPPORTED_BACKENDS[0]
def set_default_backend(new_default_backend):
"""Sets the default cryptensor backend (mpc, he)"""
global __default_backend
assert new_default_backend in __SUPPORTED_BACKENDS, (
"Backend %s is not supported" % new_default_backend
)
__default_backend = new_default_backend
def get_default_backend():
"""Returns the default cryptensor backend (mpc, he)"""
return __default_backend
def | (*args, backend=None, **kwargs):
"""
Factory function to return encrypted tensor of given backend.
"""
if backend is None:
backend = get_default_backend()
if backend == crypten.mpc:
return backend.MPCTensor(*args, **kwargs)
else:
raise TypeError("Backend %s is not supported" % backend)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_przs():
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. The random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
# Initialize RNG Generators
comm.get().g0 = torch.Generator()
comm.get().g1 = torch.Generator()
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value.
# We sometimes get here from a forked process, which causes all parties
# to have the same RNG state. Reset the seed to make sure RNG streams
# are different in all the parties. We use numpy's random here since
# setting its seed to None will produce different seeds even from
# forked processes.
import numpy
numpy.random.seed(seed=None)
next_seed = torch.tensor(numpy.random.randint(-2 ** 63, 2 ** 63 - 1, (1,)))
prev_seed = torch.LongTensor([0]) # placeholder
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Otherwise sending seeds will segfault.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(tensor=next_seed, dst=next_rank)
req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
# Seed Generators
comm.get().g0.manual_seed(next_seed.item())
comm.get().g1.manual_seed(prev_seed.item())
def __validate_model(loaded_model, dummy_model):
"""Validates that two models have the same architecture"""
loaded_modules = [loaded_model]
dummy_modules = [dummy_model]
valid = torch.tensor(1, dtype=torch.long)
try:
while len(loaded_modules) > 0:
loaded_module = loaded_modules.pop(0)
dummy_module = dummy_modules.pop(0)
# Assert modules have the same number of parameters
loaded_params = [param for param in loaded_module.parameters()]
dummy_params = [param for param in dummy_module.parameters()]
assert len(loaded_params) == len(dummy_params)
for i, param in enumerate(loaded_params):
assert param.size() == dummy_params[i].size()
# Assert that modules have the same number of sub-modules
loaded_module_modules = [mod for mod in loaded_module.modules()][1:]
dummy_module_modules = [mod for mod in dummy_module.modules()][1:]
loaded_modules.extend(loaded_module_modules)
dummy_modules.extend(dummy_module_modules)
assert len(loaded_modules) == len(dummy_modules)
except AssertionError:
valid = torch.tensor(0, dtype=torch.long)
return valid
def load(f, encrypted=False, dummy_model=None, src=0, **kwargs):
"""
Loads an object saved with `torch.save()` or `crypten.save()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
dummy_model: Takes a model architecture to fill with the loaded model
(on the `src` party only). Non-source parties will return the
`dummy_model` input (with data unchanged). Loading a model will
assert the correctness of the model architecture provided against
the model loaded. This argument is ignored if the file loaded is
a tensor.
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from
"""
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# TODO: Use send_obj and recv_obj to send modules without requiring a
# dummy_model
# source party
if comm.get().get_rank() == src:
result = torch.load(f, **kwargs)
# file contains torch.tensor
if torch.is_tensor(result):
# Broadcast load type
load_type = torch.tensor(0, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Broadcast size to other parties.
dim = torch.tensor(result.dim(), dtype=torch.long)
size = torch.tensor(result.size(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
comm.get().broadcast(size, src=src)
result = cryptensor(result, src=src)
# file contains torch module
elif isinstance(result, torch.nn.Module):
# Broadcast load type
load_type = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Assert that dummy_model is provided
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
# Assert that model architectures are the same
valid = __validate_model(result, dummy_model)
comm.get().broadcast(valid, src=src) # Broadcast validation
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
# file contains unrecognized type
else:
# Broadcast load type
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# raise error
raise TypeError("Unrecognized load type %s" % type(result))
# Non-source party
else:
# Receive load type from source party
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Load in tensor
if load_type.item() == 0:
# Receive size from source party
dim = torch.empty(size=(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
size = torch.empty(size=(dim.item(),), dtype=torch.long)
comm.get().broadcast(size, src=src)
result = cryptensor(torch.empty(size=tuple(size.tolist())), src=src)
# Load module using dummy_model
elif load_type.item() == 1:
# Assert dummy_model is given
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
result = dummy_model
# Receive model architecture validation
valid = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(valid, src=src)
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
else:
raise TypeError("Unrecognized load type on src")
# TODO: Encrypt modules before returning them
return result
def save(obj, f, src=0, **kwargs):
"""
Saves a CrypTensor or PyTorch tensor to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
src: The source party that writes data to the specified file.
"""
if is_encrypted_tensor(obj | cryptensor | identifier_name |
__init__.py | import crypten.nn # noqa: F401
import torch
# other imports:
from . import debug
from .cryptensor import CrypTensor
from .mpc import ptype
def init():
comm._init(use_threads=False, init_ttp=crypten.mpc.ttp_required())
if comm.get().get_rank() < comm.get().get_world_size():
_setup_przs()
if crypten.mpc.ttp_required():
crypten.mpc.provider.ttp_provider.TTPClient._init()
def init_thread(rank, world_size):
comm._init(use_threads=True, rank=rank, world_size=world_size)
_setup_przs()
def uninit():
return comm.uninit()
def is_initialized():
return comm.is_initialized()
# the different private type attributes of an mpc encrypted tensor
arithmetic = ptype.arithmetic
binary = ptype.binary
def print_communication_stats():
comm.get().print_communication_stats()
def reset_communication_stats():
comm.get().reset_communication_stats()
# Set backend
__SUPPORTED_BACKENDS = [crypten.mpc]
__default_backend = __SUPPORTED_BACKENDS[0]
def set_default_backend(new_default_backend):
"""Sets the default cryptensor backend (mpc, he)"""
global __default_backend
assert new_default_backend in __SUPPORTED_BACKENDS, (
"Backend %s is not supported" % new_default_backend
)
__default_backend = new_default_backend
def get_default_backend():
"""Returns the default cryptensor backend (mpc, he)"""
return __default_backend
def cryptensor(*args, backend=None, **kwargs):
"""
Factory function to return encrypted tensor of given backend.
"""
if backend is None:
backend = get_default_backend()
if backend == crypten.mpc:
return backend.MPCTensor(*args, **kwargs)
else:
raise TypeError("Backend %s is not supported" % backend)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_przs():
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. The random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
# Initialize RNG Generators
comm.get().g0 = torch.Generator()
comm.get().g1 = torch.Generator()
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value.
# We sometimes get here from a forked process, which causes all parties
# to have the same RNG state. Reset the seed to make sure RNG streams
# are different in all the parties. We use numpy's random here since
# setting its seed to None will produce different seeds even from
# forked processes.
import numpy
numpy.random.seed(seed=None)
next_seed = torch.tensor(numpy.random.randint(-2 ** 63, 2 ** 63 - 1, (1,)))
prev_seed = torch.LongTensor([0]) # placeholder
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Otherwise sending seeds will segfault.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(tensor=next_seed, dst=next_rank)
req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
# Seed Generators
comm.get().g0.manual_seed(next_seed.item())
comm.get().g1.manual_seed(prev_seed.item())
def __validate_model(loaded_model, dummy_model):
"""Validates that two models have the same architecture"""
loaded_modules = [loaded_model]
dummy_modules = [dummy_model]
valid = torch.tensor(1, dtype=torch.long)
try:
while len(loaded_modules) > 0:
loaded_module = loaded_modules.pop(0)
dummy_module = dummy_modules.pop(0)
# Assert modules have the same number of parameters
loaded_params = [param for param in loaded_module.parameters()]
dummy_params = [param for param in dummy_module.parameters()]
assert len(loaded_params) == len(dummy_params)
for i, param in enumerate(loaded_params):
assert param.size() == dummy_params[i].size()
# Assert that modules have the same number of sub-modules
loaded_module_modules = [mod for mod in loaded_module.modules()][1:]
dummy_module_modules = [mod for mod in dummy_module.modules()][1:]
loaded_modules.extend(loaded_module_modules)
dummy_modules.extend(dummy_module_modules)
assert len(loaded_modules) == len(dummy_modules)
except AssertionError:
valid = torch.tensor(0, dtype=torch.long)
return valid
def load(f, encrypted=False, dummy_model=None, src=0, **kwargs):
"""
Loads an object saved with `torch.save()` or `crypten.save()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
dummy_model: Takes a model architecture to fill with the loaded model
(on the `src` party only). Non-source parties will return the
`dummy_model` input (with data unchanged). Loading a model will
assert the correctness of the model architecture provided against
the model loaded. This argument is ignored if the file loaded is
a tensor.
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from
"""
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# TODO: Use send_obj and recv_obj to send modules without requiring a
# dummy_model
# source party
if comm.get().get_rank() == src:
result = torch.load(f, **kwargs)
# file contains torch.tensor
if torch.is_tensor(result):
# Broadcast load type
load_type = torch.tensor(0, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Broadcast size to other parties.
dim = torch.tensor(result.dim(), dtype=torch.long)
size = torch.tensor(result.size(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
comm.get().broadcast(size, src=src)
result = cryptensor(result, src=src)
# file contains torch module
elif isinstance(result, torch.nn.Module):
# Broadcast load type
load_type = torch.tensor(1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Assert that dummy_model is provided
assert dummy_model is not None and isinstance(
dummy_model, torch.nn.Module
), "dummy model must be provided when loading a model"
# Assert that model architectures are the same
valid = __validate_model(result, dummy_model)
comm.get().broadcast(valid, src=src) # Broadcast validation
assert valid.item(), "Model architecture does not match loaded module"
result.src = src
# file contains unrecognized type
else:
# Broadcast load type
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# raise error
raise TypeError("Unrecognized load type %s" % type(result))
# Non-source party
else:
# Receive load type from source party
load_type = torch.tensor(-1, dtype=torch.long)
comm.get().broadcast(load_type, src=src)
# Load in tensor
if load_type.item() == 0:
# Receive size from source party
dim = torch.empty(size=(), dtype=torch.long)
comm.get().broadcast(dim, src=src)
size = torch.empty(size=(dim.item(),), dtype=torch.long)
comm.get().broadcast(size, src=src)
result = cryptensor(torch.empty(size=tuple(size.tolist())), src=src)
# Load module using dummy_model
elif load_type.item() == 1:
# Assert dummy_model is given
assert dummy_model is not None and isinstance | import crypten.mpc # noqa: F401 | random_line_split |
|
debugvm.py | ")
update_cpu_views()
def cb_idle(sender, data):
VMCPU.reset()
VMCPU.moveToWord("Idle")
update_cpu_views()
def cb_halt(sender, data):
VMCPU.reset()
VMCPU.moveToWord("Halt")
update_cpu_views()
def add_controls():
if does_item_exist("Controls"):
delete_item("Controls")
with window("Controls", autosize=True, x_pos=0, y_pos=0):
with group("Buttons1", horizontal=True):
w = charW(6)
add_button("STEP", width=w, callback=cb_step, tip="Run one instruction")
add_button("STEPL", width=w, callback=cb_lstep, tip="Run one source line of code")
add_button("NEXTL", width=w, callback=cb_nextl, tip="Run until next source line of code")
with group("Buttons2", horizontal=True):
add_button("OVER", width=w, callback=cb_over, tip="Run one line of code, don't show subroutines")
add_button("OUT", width=w, callback=cb_out, tip="Run until ';' is executed")
add_button("RUN", width=w, callback=cb_run, tip="Run until completion, or a breakpoint")
with group("Buttons3", horizontal=True):
add_button("SHOT", width=w, callback=cb_shot, tip="Move to 'RunShot'")
add_button("IDLE", width=w, callback=cb_idle, tip="Move to 'Idle'")
add_button("HALT", width=w, callback=cb_halt, tip="Move to 'Halt'")
for item in get_item_children("Controls"):
set_item_style_var(item, mvGuiStyleVar_FrameRounding, [charH(1)*0.3])
set_item_style_var(item, mvGuiStyleVar_FramePadding, [charW(1)*0.3, 1])
def add_editor():
if does_item_exist("Program"):
del Windows["Program"]
delete_item("Program")
Windows["Program"] = Editor(FILETOLOAD)
def cb_add_controls(sender, data):
add_controls()
def cb_add_editor(sender, data):
add_editor()
def cb_nop(sender, data):
pass
def hsv_to_rgb(h: float, s: float, v: float, a:float) -> (float, float, float, float):
if s == 0.0: return (v, v, v, 255*a)
i = int(h*6.)
f = (h*6.)-i; p,q,t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f)); i%=6
if i == 0: return (255*v, 255*t, 255*p, 255*a)
if i == 1: return (255*q, 255*v, 255*p, 255*a)
if i == 2: return (255*p, 255*v, 255*t, 255*a)
if i == 3: return (255*p, 255*q, 255*v, 255*a)
if i == 4: return (255*t, 255*p, 255*v, 255*a)
if i == 5: return (255*v, 255*p, 255*q, 255*a)
class Editor:
def __init__(self, filename):
self.D = DebugDis(filename)
self.TextLines = self.D.SourceLines
self.addLines()
self.Selected = None
def selectMemAddr(self, addr):
"""
Highlight the line of code associated with the CPU program counter
"""
oldaddr = self.Selected
if oldaddr != None:
sl = self.D.getSourceLineForAddr(oldaddr)
item = f"SourceL{sl}"
#for item in get_item_children(f"SourceG{sl}"):
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
self.Selected = addr
if self.Selected != None:
sl = self.D.getSourceLineForAddr(addr)
#for item in get_item_children(f"SourceG{sl}"):
item = f"SourceL{sl}"
set_item_color(item, mvGuiCol_Button, hsv_to_rgb(4/7.0, 0.8, 0.8, 1.0))
#set_item_color(f"SourceLNG{sl}", mvGuiCol_Text, [155,0,75,175])
#configure_item(f"SourceL{sl}", enabled=True)
#print(get_item_configuration(f"SourceL{sl}"))
def updateDisplay(self):
self.selectMemAddr(VMCPU.PC)
def cb_addr_click(self, sender, data):
#print(sender, data)
VMCPU.toggleBP(data)
item = f"SourceLN{self.D.getSourceLineForAddr(data)}"
i = 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
if VMCPU.isBP(data):
set_item_color(item, mvGuiCol_Button, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_color(item, mvGuiCol_ButtonHovered, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
configure_item(item, tip="Breakpoint at Addr %d" % data)
else:
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
set_item_color(item, mvGuiCol_ButtonHovered, hovercol)
configure_item(item, tip="")
def addLine(self, name, count, field1, field2, padto, cb, cb_data):
field2 = field2 + (' '*(padto-len(field2))) + ' '
with group(f"{name}G{count}", horizontal=True):
add_button(f"{name}LN{count}", label = field1, callback=cb, callback_data=cb_data)
if field2 == '':
add_button(f"{name}L{count}", label = ' ')
else:
add_button(f"{name}L{count}", label = field2)
i = 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
for item in get_item_children(f"{name}G{count}"):
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
set_item_color(item, mvGuiCol_ButtonHovered, hovercol)
set_item_color(item, mvGuiCol_ButtonActive, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_style_var(item, mvGuiStyleVar_FrameRounding, [2])
set_item_style_var(item, mvGuiStyleVar_FramePadding, [1, 1])
def addLines(self):
longestline = max(len(x.rstrip()) for x in self.TextLines)
with window("Program", x_pos=400, y_pos=200, width=charW(longestline+12), height=charH(40), no_scrollbar=True):
with tab_bar("ProgramTab"):
with tab("Source"):
with child("SourceChild", autosize_x=True, autosize_y=True):
for i, line in enumerate(self.TextLines, start=1):
addr = self.D.getAddrForSourceLine(i)
self.addLine("Source", i, "%5d" % i, line, longestline, self.cb_addr_click, addr)
with tab("Opcodes"):
memdump = self.D.dumpOpcodes()
for i, op in enumerate(memdump):
addr = op[0]
with group(f"opcodesLG{addr}", horizontal=True):
add_text(f"opcodeAddr{addr}", default_value= "%5d" % op[0])
add_text(f"opcodeBytes{addr}", default_value= " ".join([ "%02X" % x for x in op[1]]))
if op[2]:
add_text(f"opcodeval{i}", default_value= ("%d" % op[2]))
add_text(f"opcodesym{i}", default_value='('+op[3]+')')
else:
add_text(f"opcodesym{i}", default_value=op[3])
class MemoryDisplay:
def __init__(self, cpu, name):
self.Name = name
self.CPU = cpu
self.createDisplay()
def getFloatAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetch(SEntry(a, None))
def getByteAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetchB(SEntry(a, None))
def | updateDisplay | identifier_name |
|
debugvm.py | point:
update_cpu_views()
def cb_out(sender, data):
if "Program" not in Windows:
return
try:
while VMCPU.getCurrentOpcodeName()!= ";":
VMCPU.step(ignorebp=VMCPU.PC)
update_cpu_views()
VMCPU.step(ignorebp=VMCPU.PC)
update_cpu_views()
except VMCPUStopped:
update_cpu_views()
except VMCPUBreakpoint:
update_cpu_views()
def cb_lstep(sender, data):
# Step opcode execution until associated source line changes
if "Program" not in Windows:
return
try:
editor = Windows["Program"]
currentpc = VMCPU.PC
currentline = editor.D.getSourceLineForAddr(VMCPU.PC)
line = currentline
while (line == currentline):
VMCPU.step(ignorebp=currentpc)
update_cpu_views()
line = editor.D.getSourceLineForAddr(VMCPU.PC)
except VMCPUStopped:
update_cpu_views()
except VMCPUBreakpoint:
update_cpu_views()
# def step_opcode():
# addr = VMCPU.PC
# nextaddr = VMCPU.nextOpcodeAddr(addr)
# while VMCPU.PC != nextaddr:
# VMCPU.step()
def cb_nextl(sender, data):
# Step opcode execution until source line increments, ignoring subroutines
if "Program" not in Windows:
return
try:
editor = Windows["Program"]
currentop = VMCPU.getOpcodeName(VMCPU.ROM[VMCPU.PC])
if currentop == ';':
cb_step(sender, data)
return
# TODO: FOR is going to create some interesting corner cases. Will deal with it later.
currentpc = VMCPU.PC
currentline = editor.D.getSourceLineForAddr(VMCPU.PC)
line = currentline
nextlineaddr = editor.D.getAddrForSourceLine(currentline+1)
while line == currentline:
while VMCPU.PC != nextlineaddr:
VMCPU.step(ignorebp=currentpc)
if random.random() < 0.05:
update_cpu_views() # Update UI 5% of the time
update_cpu_views()
line = editor.D.getSourceLineForAddr(VMCPU.PC)
except VMCPUStopped:
update_cpu_views()
except VMCPUBreakpoint:
update_cpu_views()
def cb_over(sender, data):
# Step opcode execution until source line changes, without viewing subroutines
if "Program" not in Windows:
return
try:
editor = Windows["Program"]
currentop = VMCPU.getOpcodeName(VMCPU.ROM[VMCPU.PC])
if currentop == ';':
cb_step(sender, data)
return
currentpc = VMCPU.PC
currentline = editor.D.getSourceLineForAddr(VMCPU.PC)
line = currentline
while line == currentline:
if VMCPU.isCurrentOpCall():
# It's a call to a subroutine. Run until we get out.
nextlineaddr = editor.D.getAddrForSourceLine(line+1)
while VMCPU.PC != nextlineaddr:
VMCPU.step(ignorebp=currentpc)
else:
# Not a call. Just execute an opcode
VMCPU.step(ignorebp=currentpc)
update_cpu_views()
line = editor.D.getSourceLineForAddr(VMCPU.PC)
except VMCPUStopped:
update_cpu_views()
except VMCPUBreakpoint:
update_cpu_views()
def cb_shot(sender, data):
SYSTIME.reset()
VMCPU.reset()
VMCPU.moveToWord("RunShot")
update_cpu_views()
def cb_idle(sender, data):
VMCPU.reset()
VMCPU.moveToWord("Idle")
update_cpu_views()
def cb_halt(sender, data):
VMCPU.reset()
VMCPU.moveToWord("Halt")
update_cpu_views()
def add_controls():
if does_item_exist("Controls"):
|
with window("Controls", autosize=True, x_pos=0, y_pos=0):
with group("Buttons1", horizontal=True):
w = charW(6)
add_button("STEP", width=w, callback=cb_step, tip="Run one instruction")
add_button("STEPL", width=w, callback=cb_lstep, tip="Run one source line of code")
add_button("NEXTL", width=w, callback=cb_nextl, tip="Run until next source line of code")
with group("Buttons2", horizontal=True):
add_button("OVER", width=w, callback=cb_over, tip="Run one line of code, don't show subroutines")
add_button("OUT", width=w, callback=cb_out, tip="Run until ';' is executed")
add_button("RUN", width=w, callback=cb_run, tip="Run until completion, or a breakpoint")
with group("Buttons3", horizontal=True):
add_button("SHOT", width=w, callback=cb_shot, tip="Move to 'RunShot'")
add_button("IDLE", width=w, callback=cb_idle, tip="Move to 'Idle'")
add_button("HALT", width=w, callback=cb_halt, tip="Move to 'Halt'")
for item in get_item_children("Controls"):
set_item_style_var(item, mvGuiStyleVar_FrameRounding, [charH(1)*0.3])
set_item_style_var(item, mvGuiStyleVar_FramePadding, [charW(1)*0.3, 1])
def add_editor():
if does_item_exist("Program"):
del Windows["Program"]
delete_item("Program")
Windows["Program"] = Editor(FILETOLOAD)
def cb_add_controls(sender, data):
add_controls()
def cb_add_editor(sender, data):
add_editor()
def cb_nop(sender, data):
pass
def hsv_to_rgb(h: float, s: float, v: float, a:float) -> (float, float, float, float):
if s == 0.0: return (v, v, v, 255*a)
i = int(h*6.)
f = (h*6.)-i; p,q,t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f)); i%=6
if i == 0: return (255*v, 255*t, 255*p, 255*a)
if i == 1: return (255*q, 255*v, 255*p, 255*a)
if i == 2: return (255*p, 255*v, 255*t, 255*a)
if i == 3: return (255*p, 255*q, 255*v, 255*a)
if i == 4: return (255*t, 255*p, 255*v, 255*a)
if i == 5: return (255*v, 255*p, 255*q, 255*a)
class Editor:
def __init__(self, filename):
self.D = DebugDis(filename)
self.TextLines = self.D.SourceLines
self.addLines()
self.Selected = None
def selectMemAddr(self, addr):
"""
Highlight the line of code associated with the CPU program counter
"""
oldaddr = self.Selected
if oldaddr != None:
sl = self.D.getSourceLineForAddr(oldaddr)
item = f"SourceL{sl}"
#for item in get_item_children(f"SourceG{sl}"):
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
self.Selected = addr
if self.Selected != None:
sl = self.D.getSourceLineForAddr(addr)
#for item in get_item_children(f"SourceG{sl}"):
item = f"SourceL{sl}"
set_item_color(item, mvGuiCol_Button, hsv_to_rgb(4/7.0, 0.8, 0.8, 1.0))
#set_item_color(f"SourceLNG{sl}", mvGuiCol_Text, [155,0,75,175])
#configure_item(f"SourceL{sl}", enabled=True)
#print(get_item_configuration(f"SourceL{sl}"))
def updateDisplay(self):
self.selectMemAddr(VMCPU.PC)
def cb_addr_click(self, sender, data):
#print(sender, data)
VMCPU.toggleBP(data)
item = f"SourceLN{self.D.getSourceLineForAddr(data)}"
i = 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
if VMCPU.isBP(data):
set_item_color(item, mvGuiCol_Button, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_color(item, mvGuiCol | delete_item("Controls") | conditional_block |
debugvm.py | print(get_item_configuration(f"SourceL{sl}"))
def updateDisplay(self):
self.selectMemAddr(VMCPU.PC)
def cb_addr_click(self, sender, data):
#print(sender, data)
VMCPU.toggleBP(data)
item = f"SourceLN{self.D.getSourceLineForAddr(data)}"
i = 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
if VMCPU.isBP(data):
set_item_color(item, mvGuiCol_Button, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_color(item, mvGuiCol_ButtonHovered, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
configure_item(item, tip="Breakpoint at Addr %d" % data)
else:
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
set_item_color(item, mvGuiCol_ButtonHovered, hovercol)
configure_item(item, tip="")
def addLine(self, name, count, field1, field2, padto, cb, cb_data):
field2 = field2 + (' '*(padto-len(field2))) + ' '
with group(f"{name}G{count}", horizontal=True):
add_button(f"{name}LN{count}", label = field1, callback=cb, callback_data=cb_data)
if field2 == '':
add_button(f"{name}L{count}", label = ' ')
else:
add_button(f"{name}L{count}", label = field2)
i = 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
for item in get_item_children(f"{name}G{count}"):
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
set_item_color(item, mvGuiCol_ButtonHovered, hovercol)
set_item_color(item, mvGuiCol_ButtonActive, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_style_var(item, mvGuiStyleVar_FrameRounding, [2])
set_item_style_var(item, mvGuiStyleVar_FramePadding, [1, 1])
def addLines(self):
longestline = max(len(x.rstrip()) for x in self.TextLines)
with window("Program", x_pos=400, y_pos=200, width=charW(longestline+12), height=charH(40), no_scrollbar=True):
with tab_bar("ProgramTab"):
with tab("Source"):
with child("SourceChild", autosize_x=True, autosize_y=True):
for i, line in enumerate(self.TextLines, start=1):
addr = self.D.getAddrForSourceLine(i)
self.addLine("Source", i, "%5d" % i, line, longestline, self.cb_addr_click, addr)
with tab("Opcodes"):
memdump = self.D.dumpOpcodes()
for i, op in enumerate(memdump):
addr = op[0]
with group(f"opcodesLG{addr}", horizontal=True):
add_text(f"opcodeAddr{addr}", default_value= "%5d" % op[0])
add_text(f"opcodeBytes{addr}", default_value= " ".join([ "%02X" % x for x in op[1]]))
if op[2]:
add_text(f"opcodeval{i}", default_value= ("%d" % op[2]))
add_text(f"opcodesym{i}", default_value='('+op[3]+')')
else:
add_text(f"opcodesym{i}", default_value=op[3])
class MemoryDisplay:
def __init__(self, cpu, name):
self.Name = name
self.CPU = cpu
self.createDisplay()
def getFloatAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetch(SEntry(a, None))
def getByteAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetchB(SEntry(a, None))
def updateDisplay(self):
wl = self.CPU.getMemWriteList()
if len(wl):
for addr, writelen in wl:
for byteaddr in range(addr, addr+writelen):
val, sym = self.getByteAddrInfo(byteaddr)
val = int(round(val))
set_value(f"{self.Name}bval_{byteaddr}", "%02X %4d" % (val, val))
set_value(f"{self.Name}bsym_{byteaddr}", "%s" % sym)
if (byteaddr % 4) == 0:
val, sym = self.getFloatAddrInfo(byteaddr)
set_value(f"{self.Name}fval_{byteaddr}", "%12.6f" % val)
set_value(f"{self.Name}fsym_{byteaddr}", "%s" % sym)
self.CPU.clearMemWriteList()
def createDisplay(self):
with window(self.Name):
with child(f"{self.Name}child", width=charW(100), height=charH(16), border=False):
with managed_columns(f"{self.Name}mc", 2):
with group(f"{self.Name}left"):
for byteaddr in range(256):
# 4 Bytes and a Float
# Horizontal double column, 4 bytes on the left, float on the right
with group(f"{self.Name}bline_{byteaddr}", horizontal=True):
val, sym = self.getByteAddrInfo(byteaddr)
val = int(round(val))
add_text(f"{self.Name}byte_{byteaddr}", default_value="%05d %04x" % (byteaddr, byteaddr))
add_text(f"{self.Name}bval_{byteaddr}", default_value="%02X %4d" % (val, val))
add_text(f"{self.Name}bsym_{byteaddr}", default_value="%s" % (sym,))
with group(f"{self.Name}right"):
for addr in range(0, 256):
if (addr % 4) == 0:
with group(f"{self.Name}fline_{addr}", horizontal=True):
val, sym = self.getFloatAddrInfo(addr)
#add_text(f"{self.Name}float_{addr}", default_value="%05d %04x" % (addr, addr))
add_text(f"{self.Name}fval_{addr}", default_value="%12.6f" % (val,))
add_text(f"{self.Name}fsym_{addr}", default_value="%s" % (sym,))
else:
with group(f"{self.Name}spacerg_{addr}", horizontal=True):
add_text(f"{self.Name}spacerl_{addr}", default_value=' ')
class StackDisplay:
def __init__(self, name, stack):
self.Stack = stack
self.Name = name
self.createDisplay()
def getStackVal(self, pos):
if len(self.Stack) > pos:
return self.Stack.read(pos)
else:
return None
def updateDisplay(self):
if self.Stack.Changed:
for i in range(64):
sv = self.getStackVal(i)
if sv != None:
#print(get_item_configuration(f"{self.Name}val_{i}"))
configure_item(f"{self.Name}val_{i}", label=("%12.6f" % self.getStackVal(i).float))
set_value(f"{self.Name}sym_{i}", self.getStackVal(i).symbol)
configure_item(f"{self.Name}sym_{i}", tip=self.getStackVal(i).symbol)
else:
configure_item(f"{self.Name}val_{i}", label="------------")
set_value(f"{self.Name}sym_{i}", '')
configure_item(f"{self.Name}sym_{i}", tip='')
def createDisplay(self):
with window(self.Name, autosize=True):
with child(f"{self.Name}child", width=charW(40), height=charH(16), border=False):
for i in range(64):
with group(f"{self.Name}group_{i}", horizontal=True):
add_text(f"{self.Name}pos_{i}", default_value="%02d" % i)
sv = self.getStackVal(i)
if sv != None:
with tree_node(f"{self.Name}val_{i}", label="%12.6f" % self.getStackVal(i).float, default_open=True):
add_text(f"{self.Name}sym_{i}", default_value=self.getStackVal(i).symbol)
else:
with tree_node(f"{self.Name}val_{i}", label="------------", default_open=True):
add_text(f"{self.Name}sym_{i}", default_value='')
def add_stack(stack, name):
| if does_item_exist(name):
del Windows[name]
delete_item(name)
Windows[name] = StackDisplay(name, stack) | identifier_body |
|
debugvm.py | 4
hovercol = hsv_to_rgb(i/7.0, 0.7, 0.7, 0.3)
for item in get_item_children(f"{name}G{count}"):
set_item_color(item, mvGuiCol_Button, [0,0,0,0])
set_item_color(item, mvGuiCol_ButtonHovered, hovercol)
set_item_color(item, mvGuiCol_ButtonActive, hsv_to_rgb(i/7.0, 0.8, 0.8, 1.0))
set_item_style_var(item, mvGuiStyleVar_FrameRounding, [2])
set_item_style_var(item, mvGuiStyleVar_FramePadding, [1, 1])
def addLines(self):
longestline = max(len(x.rstrip()) for x in self.TextLines)
with window("Program", x_pos=400, y_pos=200, width=charW(longestline+12), height=charH(40), no_scrollbar=True):
with tab_bar("ProgramTab"):
with tab("Source"):
with child("SourceChild", autosize_x=True, autosize_y=True):
for i, line in enumerate(self.TextLines, start=1):
addr = self.D.getAddrForSourceLine(i)
self.addLine("Source", i, "%5d" % i, line, longestline, self.cb_addr_click, addr)
with tab("Opcodes"):
memdump = self.D.dumpOpcodes()
for i, op in enumerate(memdump):
addr = op[0]
with group(f"opcodesLG{addr}", horizontal=True):
add_text(f"opcodeAddr{addr}", default_value= "%5d" % op[0])
add_text(f"opcodeBytes{addr}", default_value= " ".join([ "%02X" % x for x in op[1]]))
if op[2]:
add_text(f"opcodeval{i}", default_value= ("%d" % op[2]))
add_text(f"opcodesym{i}", default_value='('+op[3]+')')
else:
add_text(f"opcodesym{i}", default_value=op[3])
class MemoryDisplay:
def __init__(self, cpu, name):
self.Name = name
self.CPU = cpu
self.createDisplay()
def getFloatAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetch(SEntry(a, None))
def getByteAddrInfo(self, a):
"""
Given an address, return the value at that address, and it's symbol
"""
return self.CPU.memFetchB(SEntry(a, None))
def updateDisplay(self):
wl = self.CPU.getMemWriteList()
if len(wl):
for addr, writelen in wl:
for byteaddr in range(addr, addr+writelen):
val, sym = self.getByteAddrInfo(byteaddr)
val = int(round(val))
set_value(f"{self.Name}bval_{byteaddr}", "%02X %4d" % (val, val))
set_value(f"{self.Name}bsym_{byteaddr}", "%s" % sym)
if (byteaddr % 4) == 0:
val, sym = self.getFloatAddrInfo(byteaddr)
set_value(f"{self.Name}fval_{byteaddr}", "%12.6f" % val)
set_value(f"{self.Name}fsym_{byteaddr}", "%s" % sym)
self.CPU.clearMemWriteList()
def createDisplay(self):
with window(self.Name):
with child(f"{self.Name}child", width=charW(100), height=charH(16), border=False):
with managed_columns(f"{self.Name}mc", 2):
with group(f"{self.Name}left"):
for byteaddr in range(256):
# 4 Bytes and a Float
# Horizontal double column, 4 bytes on the left, float on the right
with group(f"{self.Name}bline_{byteaddr}", horizontal=True):
val, sym = self.getByteAddrInfo(byteaddr)
val = int(round(val))
add_text(f"{self.Name}byte_{byteaddr}", default_value="%05d %04x" % (byteaddr, byteaddr))
add_text(f"{self.Name}bval_{byteaddr}", default_value="%02X %4d" % (val, val))
add_text(f"{self.Name}bsym_{byteaddr}", default_value="%s" % (sym,))
with group(f"{self.Name}right"):
for addr in range(0, 256):
if (addr % 4) == 0:
with group(f"{self.Name}fline_{addr}", horizontal=True):
val, sym = self.getFloatAddrInfo(addr)
#add_text(f"{self.Name}float_{addr}", default_value="%05d %04x" % (addr, addr))
add_text(f"{self.Name}fval_{addr}", default_value="%12.6f" % (val,))
add_text(f"{self.Name}fsym_{addr}", default_value="%s" % (sym,))
else:
with group(f"{self.Name}spacerg_{addr}", horizontal=True):
add_text(f"{self.Name}spacerl_{addr}", default_value=' ')
class StackDisplay:
def __init__(self, name, stack):
self.Stack = stack
self.Name = name
self.createDisplay()
def getStackVal(self, pos):
if len(self.Stack) > pos:
return self.Stack.read(pos)
else:
return None
def updateDisplay(self):
if self.Stack.Changed:
for i in range(64):
sv = self.getStackVal(i)
if sv != None:
#print(get_item_configuration(f"{self.Name}val_{i}"))
configure_item(f"{self.Name}val_{i}", label=("%12.6f" % self.getStackVal(i).float))
set_value(f"{self.Name}sym_{i}", self.getStackVal(i).symbol)
configure_item(f"{self.Name}sym_{i}", tip=self.getStackVal(i).symbol)
else:
configure_item(f"{self.Name}val_{i}", label="------------")
set_value(f"{self.Name}sym_{i}", '')
configure_item(f"{self.Name}sym_{i}", tip='')
def createDisplay(self):
with window(self.Name, autosize=True):
with child(f"{self.Name}child", width=charW(40), height=charH(16), border=False):
for i in range(64):
with group(f"{self.Name}group_{i}", horizontal=True):
add_text(f"{self.Name}pos_{i}", default_value="%02d" % i)
sv = self.getStackVal(i)
if sv != None:
with tree_node(f"{self.Name}val_{i}", label="%12.6f" % self.getStackVal(i).float, default_open=True):
add_text(f"{self.Name}sym_{i}", default_value=self.getStackVal(i).symbol)
else:
with tree_node(f"{self.Name}val_{i}", label="------------", default_open=True):
add_text(f"{self.Name}sym_{i}", default_value='')
def add_stack(stack, name):
if does_item_exist(name):
del Windows[name]
delete_item(name)
Windows[name] = StackDisplay(name, stack)
def add_mem(cpu, name):
if does_item_exist(name):
del Windows[name]
delete_item(name)
Windows[name] = MemoryDisplay(cpu, name)
class CPUInfo:
def __init__(self, cpu, name):
self.Name = name
self.CPU = cpu
self.createDisplay()
def updateDisplay(self):
set_value(f"{self.Name}PC", "PC: %05d" % self.CPU.PC)
set_value(f"{self.Name}Cycles", "Cycles: %06d" % self.CPU.Cycles)
def createDisplay(self):
with window(self.Name, autosize=True):
with child(f"{self.Name}child", width=charW(16), height=charH(3)):
with group(f"{self.Name}group"):
add_text(f"{self.Name}PC", default_value="PC: %05d" % self.CPU.PC)
add_text(f"{self.Name}Cycles", default_value="Cycles: %06d" % self.CPU.Cycles)
def add_cpu_info(cpu, name):
if does_item_exist(name):
del Windows[name]
delete_item(name)
Windows[name] = CPUInfo(cpu, name)
def fix_window_positions():
wp = get_style_frame_padding()
mbw, mbh = [int(x) for x in get_item_rect_size("MenuBar")]
windows = get_windows()
windows = [x for x in windows if x != "Main Window"]
for i in windows:
x, y = [int(x) for x in get_window_pos(i)]
fix = False
if x < 0:
x = 0
fix = True |
if y < mbh: | random_line_split |
|
aug_utility.py |
return boxes
def find_margined_bounding_boxes(fimage, lables, margins):
# initialize boxes array
boxes = []
for lable in lables:
# iterate all lables
# filter out image pixels with current lable
labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled, margins)
# append found bouding box
boxes.append(box)
return boxes
def find_bounding_box(binary_matrix, margins=(0, 0)):
# extract indexes of foreground pixels
indicies = np.array(np.nonzero(binary_matrix + 0))
# get contours
ys = margins[1] + np.amin(indicies[0])
ye = margins[1] + np.amax(indicies[0])
xs = margins[0] + np.amin(indicies[1])
xe = margins[0] + np.amax(indicies[1])
# return contours
return [(xs, ys), (xe, ye)]
def weightFilter(image, lables, weight):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8((image == max) + 0)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_bounding_boxes(image.copy(), retained_lables)
return fimage, boxes
def weightFilterMini(image, weight):
image = np.uint8(image)
# extract contours
image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
final_contours = []
for cnt in contours:
if cv2.contourArea(cnt) >= weight:
# add it to final_contours
final_contours.append(cnt)
fimage = np.zeros((image.shape[:2]), np.uint8)
cv2.drawContours(fimage, final_contours, -1, 255, -1)
boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.boundingRect(cnt) for cnt in final_contours]))
return fimage, boxes
def weightFilterMargined(image, lables, weight, margins):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8(image == max)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_margined_bounding_boxes(image.copy(), retained_lables, margins)
return fimage, boxes
def calculatePossiblePadding(box, shape, default = 20):
w_pad = default
h_pad = default
# dynamic padding
if default == 0:
rbox = RBox.fromPointBoundingBox(box)
w_pad = round(0.205 * rbox.w)
h_pad = round(0.205 * rbox.h)
# extract with and height from shape
height, width = shape[0:2]
# extract starting, ending x and y from box
((x_start, y_start), (x_end, y_end)) = box
# check if is it possible to add certain padding
# if not add possible padding for all 4 points
pad_x_start = h_pad
if y_start - pad_x_start < 0:
pad_x_start = y_start
pad_y_start = w_pad
if x_start - pad_y_start < 0:
pad_y_start = x_start
pad_x_end = w_pad
if y_end + pad_x_end >= height:
pad_x_end = height - y_end - 1
pad_y_end = h_pad
if x_end + pad_y_end >= width:
pad_y_end = width - x_end - 1
# return resultant padding
return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w | labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled)
# append found bouding box
boxes.append(box) | conditional_block |
|
aug_utility.py | (box, shape, default = 20):
w_pad = default
h_pad = default
# dynamic padding
if default == 0:
rbox = RBox.fromPointBoundingBox(box)
w_pad = round(0.205 * rbox.w)
h_pad = round(0.205 * rbox.h)
# extract with and height from shape
height, width = shape[0:2]
# extract starting, ending x and y from box
((x_start, y_start), (x_end, y_end)) = box
# check if is it possible to add certain padding
# if not add possible padding for all 4 points
pad_x_start = h_pad
if y_start - pad_x_start < 0:
pad_x_start = y_start
pad_y_start = w_pad
if x_start - pad_y_start < 0:
pad_y_start = x_start
pad_x_end = w_pad
if y_end + pad_x_end >= height:
pad_x_end = height - y_end - 1
pad_y_end = h_pad
if x_end + pad_y_end >= width:
pad_y_end = width - x_end - 1
# return resultant padding
return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@staticmethod
def fromClassicalBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[1]
rbox.w = box[2]
rbox.h = box[3]
# return rbox
return rbox
@staticmethod
def fromClassicalBoundingBoxes(boxes):
return [RBox.fromClassicalBoundingBox(box) for box in boxes]
@staticmethod
def fromRoughBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[2]
rbox.h = box[1] - box[0]
rbox.w = box[3] - box[2]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0][0]
rbox.y = box[0][1]
rbox.w = box[1][0] - box[0][0]
rbox.h = box[1][1] - box[0][1]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBoxes(boxes):
return [RBox.fromPointBoundingBox(box) for box in boxes]
def classicalBoundingBox(self):
# return array like bounding box
return [self.x, self.y, self.w, self.h]
def pointBoundingBox(self):
# return tuple of end points
return ((self.x, self.y), (self.x + self.w, self.y + self.h))
def area(self):
return self.h * self.w
def __or__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = min(self.x, other_box.x)
rbox.y = min(self.y, other_box.y)
rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y
return rbox
def __and__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = max(self.x, other_box.x)
rbox.y = max(self.y, other_box.y)
rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y
if rbox.w < 0 or rbox.h < 0:
# reinitailize or make it zero
rbox = RBox()
return rbox
def similarity(self, other_box):
# (A & B)/(A | B) = (A & B).area/(A.area + B.area - (A & B).area)
#return (self & other_box).area()/(self.area() + other_box.area() - (self & other_box).area())
min_area = min(self.area(), other_box.area())
return (self & other_box).area()/min_area
def __str__(self):
return "{} {} {} {}".format(self.x, self.y, self.w, self.h)
def __mul__(self, other_box):
# calculate similarity and return
return self.similarity(other_box)
def | __eq__ | identifier_name |
|
aug_utility.py | np.uint8(image)
# extract contours
image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
final_contours = []
for cnt in contours:
if cv2.contourArea(cnt) >= weight:
# add it to final_contours
final_contours.append(cnt)
fimage = np.zeros((image.shape[:2]), np.uint8)
cv2.drawContours(fimage, final_contours, -1, 255, -1)
boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.boundingRect(cnt) for cnt in final_contours]))
return fimage, boxes
def weightFilterMargined(image, lables, weight, margins):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8(image == max)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_margined_bounding_boxes(image.copy(), retained_lables, margins)
return fimage, boxes
def calculatePossiblePadding(box, shape, default = 20):
w_pad = default
h_pad = default
# dynamic padding
if default == 0:
rbox = RBox.fromPointBoundingBox(box)
w_pad = round(0.205 * rbox.w)
h_pad = round(0.205 * rbox.h)
# extract with and height from shape
height, width = shape[0:2]
# extract starting, ending x and y from box
((x_start, y_start), (x_end, y_end)) = box
| pad_x_start = h_pad
if y_start - pad_x_start < 0:
pad_x_start = y_start
pad_y_start = w_pad
if x_start - pad_y_start < 0:
pad_y_start = x_start
pad_x_end = w_pad
if y_end + pad_x_end >= height:
pad_x_end = height - y_end - 1
pad_y_end = h_pad
if x_end + pad_y_end >= width:
pad_y_end = width - x_end - 1
# return resultant padding
return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@staticmethod
def fromClassicalBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[1]
rbox.w = box[2]
rbox.h = box[3]
# return rbox
return rbox
@staticmethod
def fromClassicalBoundingBoxes(boxes):
return [RBox.fromClassicalBoundingBox(box) for box in boxes]
@staticmethod
def fromRoughBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[2]
rbox.h = box[1] - box[0]
rbox.w = box[3] - box[2]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0][0]
rbox.y = box[0][1]
rbox.w = box[1][0] - box[0][0]
rbox.h = box[1][1] - box[0][1]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBoxes(boxes):
return [RBox.fromPointBoundingBox(box) for box in boxes]
def classicalBoundingBox(self):
# return array like bounding box
return [self.x, self.y, self.w, self.h]
def pointBoundingBox(self):
# return tuple of end points
return ((self.x, self.y), (self.x + self.w, self.y + self.h))
def area(self):
return self.h * self.w
def __or__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = min(self.x, other_box.x)
rbox.y = min(self.y, other_box.y)
rbox.w = max | # check if is it possible to add certain padding
# if not add possible padding for all 4 points | random_line_split |
aug_utility.py | return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@staticmethod
def fromClassicalBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[1]
rbox.w = box[2]
rbox.h = box[3]
# return rbox
return rbox
@staticmethod
def fromClassicalBoundingBoxes(boxes):
return [RBox.fromClassicalBoundingBox(box) for box in boxes]
@staticmethod
def fromRoughBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[2]
rbox.h = box[1] - box[0]
rbox.w = box[3] - box[2]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0][0]
rbox.y = box[0][1]
rbox.w = box[1][0] - box[0][0]
rbox.h = box[1][1] - box[0][1]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBoxes(boxes):
return [RBox.fromPointBoundingBox(box) for box in boxes]
def classicalBoundingBox(self):
# return array like bounding box
return [self.x, self.y, self.w, self.h]
def pointBoundingBox(self):
# return tuple of end points
return ((self.x, self.y), (self.x + self.w, self.y + self.h))
def area(self):
return self.h * self.w
def __or__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = min(self.x, other_box.x)
rbox.y = min(self.y, other_box.y)
rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y
return rbox
def __and__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = max(self.x, other_box.x)
rbox.y = max(self.y, other_box.y)
rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y
if rbox.w < 0 or rbox.h < 0:
# reinitailize or make it zero
rbox = RBox()
return rbox
def similarity(self, other_box):
# (A & B)/(A | B) = (A & B).area/(A.area + B.area - (A & B).area)
#return (self & other_box).area()/(self.area() + other_box.area() - (self & other_box).area())
min_area = min(self.area(), other_box.area())
return (self & other_box).area()/min_area
def __str__(self):
return "{} {} {} {}".format(self.x, self.y, self.w, self.h)
def __mul__(self, other_box):
# calculate similarity and return
return self.similarity(other_box)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.w == other.w and self.h == other.h
@staticmethod
def similarityStats(boxes):
# create matrix out of boxes
sim_mat = np.array(boxes).reshape((-1, 1))
sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)
# return similarity matrix
return sim_mat
@staticmethod
def similarityThreshold(boxes, threshold = 0.8):
# get similarity matrix
sim_mat = RBox.similarityStats(boxes)
# find thresholded indexes
ind = np.array(np.nonzero(sim_mat > threshold))
# return in the form of list
return list(ind.T)
@staticmethod
def reduceBoxes(boxes, threshold=0.8):
| similar_boxes = RBox.similarityThreshold(boxes, threshold)
while len(similar_boxes) > 0:
union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]
# remove similar boxes
del boxes[similar_boxes[0][0]]
del boxes[similar_boxes[0][1]]
boxes.append(union)
similar_boxes = RBox.similarityThreshold(boxes, threshold)
return boxes | identifier_body |
|
index.ts | from './createWixCodeSdk'
import createSdkFactoryParams from './createSdkFactoryParams'
import setPropsFactory from './setPropsFactory'
import { ControllerEvents } from './ControllerEvents'
import { DocumentSdkFactory } from './componentsSDK/Document'
import { createPlatformApi } from './appsAPI/platformAPI'
import CommonConfigManager from './commonConfigModule'
import BsiManagerModule from './bsiManagerModule'
import { createWixCodeNamespacesRegistry } from './WixCodeNamespacesRegistry'
import { platformBiLoggerFactory } from './bi/biLoggerFactory'
import { instanceCacheFactory } from './instanceCache'
import { componentSdkStateFactory } from './componentSdkState'
import { ComponentSdksManagerFactory } from './componentSdksManager'
import { RegisterEventFactory } from './createRegisterEvent'
import { PlatformAnimationsAPI } from '../animations'
import { CreateStaticEventsManager } from './staticEventsManager'
import { AppsPublicApiManagerFactory } from './appsPublicApiManager'
import { BuildPlatformUtils } from './buildPlatformUtils'
import { CreateLocationManager } from './locationManager'
import { ViewerPlatformEssentials } from '@wix/fe-essentials-viewer-platform'
import { CreateWarmupDataManager } from './warmupDataManager'
import { CreateConsentPolicyManager } from './consentPolicyManager'
import { FedopsWebVitalsManager } from './fedops'
import { SsrCacheHintsManager } from './ssr'
import { createStorageAPI } from '../storage/storageAPI'
import { ModuleFederationManagerFactory } from './moduleFederationManager'
type PlatformState = {
createStorageApi: CreateWixStorageAPI
loadComponentSdksPromise: Promise<ComponentSdksLoader>
}
export function createPlatformAPI() {
const { promise: waitForInit, resolver: initDone } = createPromise<PlatformState>()
return {
initPlatformOnSite({ logger, platformEnvData }: { logger: PlatformLogger; platformEnvData: PlatformEnvData }) {
const siteStorageApi: CreateWixStorageAPI = createStorageAPI()
initDone({
createStorageApi: (appPrefix: string, handlers: any, storageInitData: StorageInitData): WixStorageAPI => {
return siteStorageApi(appPrefix, handlers, storageInitData)
},
loadComponentSdksPromise: getComponentsSDKLoader({
platformEnvData,
logger,
}) as any, // TODO: remove `as any` after https://github.com/wix-private/editor-elements/pull/3443 is merged
})
},
async runPlatformOnPage({ bootstrapData, logger, importScripts, moduleLoader, viewerAPI, fetchModels, sessionService }: InitArgs) {
logger.interactionStarted('initialisation')
const createSdkHandlers = (pageId: string) => createDeepProxy((path: Array<string>) => (...args: Array<never>) => viewerAPI.invokeSdkHandler(pageId, path, ...args))
const modelBuilder = modelsApiProvider({ bootstrapData, fetchModels })
const modelsApi = await logger.runAsyncAndReport('getAllModels', modelBuilder.getModelApi)
const clientSpecMapApi = ClientSpecMapApi({ bootstrapData })
const handlers = createSdkHandlers(bootstrapData.currentPageId) as any
const appsPublicApiManager = AppsPublicApiManagerFactory({ modelsApi, clientSpecMapApi, logger, handlers, bootstrapData, importScripts })
if (_.isEmpty(modelsApi.getApplications())) {
if (modelsApi.hasTPAComponentOnPage()) {
// a TPA component may Wix.SuperApps.getPublicAPI(). the below code resolves this promise.
appsPublicApiManager.registerPublicApiProvider((appDefinitionId) => {
appsPublicApiManager.resolvePublicApi(appDefinitionId, null)
})
}
return
}
const platformEnvData = bootstrapData.platformEnvData
const isSSR = platformEnvData.window.isSSR
if (!isSSR) |
const fedopsWebVitalsManager = FedopsWebVitalsManager({ platformEnvData, modelsApi, handlers })
fedopsWebVitalsManager.registerWidgets()
const ssrCacheHintsManager = SsrCacheHintsManager({ platformEnvData, modelsApi, handlers })
ssrCacheHintsManager.setSsrCacheHints()
const { createStorageApi, loadComponentSdksPromise } = await waitForInit
const componentSdksManager = ComponentSdksManagerFactory({ loadComponentSdksPromise, modelsApi, logger })
const sdkInstancesCache = instanceCacheFactory()
const getCompRefById = (compId: string) => createProxy((functionName: string) => (...args: any) => handlers.invokeCompRefFunction(compId, functionName, args))
const appsUrlApi = AppsUrlApi({ bootstrapData })
const controllerEventsFactory = ControllerEvents()
const componentSdkState = componentSdkStateFactory()
const commonConfigManager = CommonConfigManager(bootstrapData, createSdkHandlers)
const bsiManager = BsiManagerModule(commonConfigManager, bootstrapData, createSdkHandlers)
const linkUtils = createLinkUtils({
isMobileView: bootstrapData.isMobileView,
getCompIdByWixCodeNickname: modelsApi.getCompIdByWixCodeNickname,
getRoleForCompId: modelsApi.getRoleForCompId,
routingInfo: platformEnvData.router.routingInfo,
metaSiteId: platformEnvData.location.metaSiteId,
userFileDomainUrl: platformEnvData.location.userFileDomainUrl,
routersConfig: bootstrapData.platformAPIData.routersConfigMap,
popupPages: platformEnvData.popups?.popupPages,
multilingualInfo: platformEnvData.multilingual,
})
const wixCodeNamespacesRegistry = createWixCodeNamespacesRegistry()
const essentials = new ViewerPlatformEssentials({
metaSiteId: platformEnvData.location.metaSiteId,
conductedExperiments: {},
appsConductedExperiments: bootstrapData.essentials.appsConductedExperiments,
getAppToken(appDefId) {
return sessionService.getInstance(appDefId)
},
isSSR,
})
const biUtils = platformBiLoggerFactory({
sessionService,
factory: essentials.biLoggerFactory,
location: platformEnvData.location,
biData: platformEnvData.bi,
site: platformEnvData.site,
})
const locationManager = CreateLocationManager({ handlers, platformEnvData, bootstrapData })
const warmupDataManager = CreateWarmupDataManager({ handlers, platformEnvData })
const consentPolicyManager = CreateConsentPolicyManager({ handlers, platformEnvData })
const platformUtils = BuildPlatformUtils({
linkUtils,
sessionService,
appsPublicApiManager,
wixCodeNamespacesRegistry,
biUtils,
locationManager,
essentials,
warmupDataManager,
consentPolicyManager,
clientSpecMapApi,
})
const { createSetProps, waitForUpdatePropsPromises, createSetPropsForOOI } = setPropsFactory({ modelsApi, viewerAPI, logger, handlers })
const registerEventFactory = RegisterEventFactory({ handlers, modelsApi })
const animationsApi = PlatformAnimationsAPI({ handlers, platformEnvData, modelsApi })
const { getSdkFactoryParams } = createSdkFactoryParams({
animationsApi,
sdkInstancesCache,
componentSdkState,
platformUtils,
viewerAPI,
modelsApi,
createSdkHandlers,
getCompRefById,
logger,
createSetProps,
registerEventFactory,
platformEnvData,
})
const wixSelector = WixSelector({
bootstrapData,
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
})
const reporter = {
logSdkError,
logSdkWarning,
}
const controllersExports: ControllersExports = {}
const AppControllerSdkLoader = async () => {
const { AppControllerSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const AppWidgetSdkLoader = async () => {
const { AppControllerWithChildrenSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerWithChildrenSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const staticEventsManager = CreateStaticEventsManager({ modelsApi, controllerEventsFactory, wixSelector, logger })
// create here
const wixCodeViewerAppUtils = WixCodeViewerAppUtils({ bootstrapData, staticEventsManager })
const blocksPreviewAppUtils = BlocksPreviewAppUtils({ bootstrapData })
const wixCodeApiFactory = createWixCodeApiFactory({
bootstrapData,
wixCodeViewerAppUtils,
modelsApi,
clientSpecMapApi,
platformUtils,
createSdkHandlers,
platformEnvData,
logger,
})
const createPlatformApiForApp = createPlatform | {
handlers.registerOnPropsChangedHandler(bootstrapData.currentContextId, (changes: CompProps) => {
_.map(changes, (newProps, compId) => {
modelsApi.updateProps(compId, newProps)
})
})
} | conditional_block |
index.ts | from './createWixCodeSdk'
import createSdkFactoryParams from './createSdkFactoryParams'
import setPropsFactory from './setPropsFactory'
import { ControllerEvents } from './ControllerEvents'
import { DocumentSdkFactory } from './componentsSDK/Document'
import { createPlatformApi } from './appsAPI/platformAPI'
import CommonConfigManager from './commonConfigModule'
import BsiManagerModule from './bsiManagerModule'
import { createWixCodeNamespacesRegistry } from './WixCodeNamespacesRegistry'
import { platformBiLoggerFactory } from './bi/biLoggerFactory'
import { instanceCacheFactory } from './instanceCache'
import { componentSdkStateFactory } from './componentSdkState'
import { ComponentSdksManagerFactory } from './componentSdksManager'
import { RegisterEventFactory } from './createRegisterEvent'
import { PlatformAnimationsAPI } from '../animations'
import { CreateStaticEventsManager } from './staticEventsManager'
import { AppsPublicApiManagerFactory } from './appsPublicApiManager'
import { BuildPlatformUtils } from './buildPlatformUtils'
import { CreateLocationManager } from './locationManager'
import { ViewerPlatformEssentials } from '@wix/fe-essentials-viewer-platform'
import { CreateWarmupDataManager } from './warmupDataManager'
import { CreateConsentPolicyManager } from './consentPolicyManager'
import { FedopsWebVitalsManager } from './fedops'
import { SsrCacheHintsManager } from './ssr'
import { createStorageAPI } from '../storage/storageAPI'
import { ModuleFederationManagerFactory } from './moduleFederationManager'
type PlatformState = {
createStorageApi: CreateWixStorageAPI
loadComponentSdksPromise: Promise<ComponentSdksLoader>
}
export function createPlatformAPI() {
const { promise: waitForInit, resolver: initDone } = createPromise<PlatformState>()
return {
initPlatformOnSite({ logger, platformEnvData }: { logger: PlatformLogger; platformEnvData: PlatformEnvData }) {
const siteStorageApi: CreateWixStorageAPI = createStorageAPI()
initDone({
createStorageApi: (appPrefix: string, handlers: any, storageInitData: StorageInitData): WixStorageAPI => {
return siteStorageApi(appPrefix, handlers, storageInitData)
},
loadComponentSdksPromise: getComponentsSDKLoader({
platformEnvData,
logger,
}) as any, // TODO: remove `as any` after https://github.com/wix-private/editor-elements/pull/3443 is merged
})
},
async runPlatformOnPage({ bootstrapData, logger, importScripts, moduleLoader, viewerAPI, fetchModels, sessionService }: InitArgs) {
logger.interactionStarted('initialisation')
const createSdkHandlers = (pageId: string) => createDeepProxy((path: Array<string>) => (...args: Array<never>) => viewerAPI.invokeSdkHandler(pageId, path, ...args))
const modelBuilder = modelsApiProvider({ bootstrapData, fetchModels })
const modelsApi = await logger.runAsyncAndReport('getAllModels', modelBuilder.getModelApi)
const clientSpecMapApi = ClientSpecMapApi({ bootstrapData })
const handlers = createSdkHandlers(bootstrapData.currentPageId) as any
const appsPublicApiManager = AppsPublicApiManagerFactory({ modelsApi, clientSpecMapApi, logger, handlers, bootstrapData, importScripts })
if (_.isEmpty(modelsApi.getApplications())) {
if (modelsApi.hasTPAComponentOnPage()) {
// a TPA component may Wix.SuperApps.getPublicAPI(). the below code resolves this promise.
appsPublicApiManager.registerPublicApiProvider((appDefinitionId) => {
appsPublicApiManager.resolvePublicApi(appDefinitionId, null)
})
}
return
}
const platformEnvData = bootstrapData.platformEnvData
const isSSR = platformEnvData.window.isSSR
if (!isSSR) {
handlers.registerOnPropsChangedHandler(bootstrapData.currentContextId, (changes: CompProps) => {
_.map(changes, (newProps, compId) => {
modelsApi.updateProps(compId, newProps)
})
})
}
const fedopsWebVitalsManager = FedopsWebVitalsManager({ platformEnvData, modelsApi, handlers })
fedopsWebVitalsManager.registerWidgets()
const ssrCacheHintsManager = SsrCacheHintsManager({ platformEnvData, modelsApi, handlers })
ssrCacheHintsManager.setSsrCacheHints()
const { createStorageApi, loadComponentSdksPromise } = await waitForInit
const componentSdksManager = ComponentSdksManagerFactory({ loadComponentSdksPromise, modelsApi, logger })
const sdkInstancesCache = instanceCacheFactory()
const getCompRefById = (compId: string) => createProxy((functionName: string) => (...args: any) => handlers.invokeCompRefFunction(compId, functionName, args))
const appsUrlApi = AppsUrlApi({ bootstrapData })
const controllerEventsFactory = ControllerEvents()
const componentSdkState = componentSdkStateFactory()
const commonConfigManager = CommonConfigManager(bootstrapData, createSdkHandlers)
const bsiManager = BsiManagerModule(commonConfigManager, bootstrapData, createSdkHandlers)
const linkUtils = createLinkUtils({
isMobileView: bootstrapData.isMobileView,
getCompIdByWixCodeNickname: modelsApi.getCompIdByWixCodeNickname,
getRoleForCompId: modelsApi.getRoleForCompId,
routingInfo: platformEnvData.router.routingInfo,
metaSiteId: platformEnvData.location.metaSiteId,
userFileDomainUrl: platformEnvData.location.userFileDomainUrl,
routersConfig: bootstrapData.platformAPIData.routersConfigMap,
popupPages: platformEnvData.popups?.popupPages,
multilingualInfo: platformEnvData.multilingual,
})
const wixCodeNamespacesRegistry = createWixCodeNamespacesRegistry()
| appsConductedExperiments: bootstrapData.essentials.appsConductedExperiments,
getAppToken(appDefId) {
return sessionService.getInstance(appDefId)
},
isSSR,
})
const biUtils = platformBiLoggerFactory({
sessionService,
factory: essentials.biLoggerFactory,
location: platformEnvData.location,
biData: platformEnvData.bi,
site: platformEnvData.site,
})
const locationManager = CreateLocationManager({ handlers, platformEnvData, bootstrapData })
const warmupDataManager = CreateWarmupDataManager({ handlers, platformEnvData })
const consentPolicyManager = CreateConsentPolicyManager({ handlers, platformEnvData })
const platformUtils = BuildPlatformUtils({
linkUtils,
sessionService,
appsPublicApiManager,
wixCodeNamespacesRegistry,
biUtils,
locationManager,
essentials,
warmupDataManager,
consentPolicyManager,
clientSpecMapApi,
})
const { createSetProps, waitForUpdatePropsPromises, createSetPropsForOOI } = setPropsFactory({ modelsApi, viewerAPI, logger, handlers })
const registerEventFactory = RegisterEventFactory({ handlers, modelsApi })
const animationsApi = PlatformAnimationsAPI({ handlers, platformEnvData, modelsApi })
const { getSdkFactoryParams } = createSdkFactoryParams({
animationsApi,
sdkInstancesCache,
componentSdkState,
platformUtils,
viewerAPI,
modelsApi,
createSdkHandlers,
getCompRefById,
logger,
createSetProps,
registerEventFactory,
platformEnvData,
})
const wixSelector = WixSelector({
bootstrapData,
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
})
const reporter = {
logSdkError,
logSdkWarning,
}
const controllersExports: ControllersExports = {}
const AppControllerSdkLoader = async () => {
const { AppControllerSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const AppWidgetSdkLoader = async () => {
const { AppControllerWithChildrenSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerWithChildrenSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const staticEventsManager = CreateStaticEventsManager({ modelsApi, controllerEventsFactory, wixSelector, logger })
// create here
const wixCodeViewerAppUtils = WixCodeViewerAppUtils({ bootstrapData, staticEventsManager })
const blocksPreviewAppUtils = BlocksPreviewAppUtils({ bootstrapData })
const wixCodeApiFactory = createWixCodeApiFactory({
bootstrapData,
wixCodeViewerAppUtils,
modelsApi,
clientSpecMapApi,
platformUtils,
createSdkHandlers,
platformEnvData,
logger,
})
const createPlatformApiForApp = createPlatformApi({
| const essentials = new ViewerPlatformEssentials({
metaSiteId: platformEnvData.location.metaSiteId,
conductedExperiments: {}, | random_line_split |
index.ts | from './createWixCodeSdk'
import createSdkFactoryParams from './createSdkFactoryParams'
import setPropsFactory from './setPropsFactory'
import { ControllerEvents } from './ControllerEvents'
import { DocumentSdkFactory } from './componentsSDK/Document'
import { createPlatformApi } from './appsAPI/platformAPI'
import CommonConfigManager from './commonConfigModule'
import BsiManagerModule from './bsiManagerModule'
import { createWixCodeNamespacesRegistry } from './WixCodeNamespacesRegistry'
import { platformBiLoggerFactory } from './bi/biLoggerFactory'
import { instanceCacheFactory } from './instanceCache'
import { componentSdkStateFactory } from './componentSdkState'
import { ComponentSdksManagerFactory } from './componentSdksManager'
import { RegisterEventFactory } from './createRegisterEvent'
import { PlatformAnimationsAPI } from '../animations'
import { CreateStaticEventsManager } from './staticEventsManager'
import { AppsPublicApiManagerFactory } from './appsPublicApiManager'
import { BuildPlatformUtils } from './buildPlatformUtils'
import { CreateLocationManager } from './locationManager'
import { ViewerPlatformEssentials } from '@wix/fe-essentials-viewer-platform'
import { CreateWarmupDataManager } from './warmupDataManager'
import { CreateConsentPolicyManager } from './consentPolicyManager'
import { FedopsWebVitalsManager } from './fedops'
import { SsrCacheHintsManager } from './ssr'
import { createStorageAPI } from '../storage/storageAPI'
import { ModuleFederationManagerFactory } from './moduleFederationManager'
type PlatformState = {
createStorageApi: CreateWixStorageAPI
loadComponentSdksPromise: Promise<ComponentSdksLoader>
}
export function | () {
const { promise: waitForInit, resolver: initDone } = createPromise<PlatformState>()
return {
initPlatformOnSite({ logger, platformEnvData }: { logger: PlatformLogger; platformEnvData: PlatformEnvData }) {
const siteStorageApi: CreateWixStorageAPI = createStorageAPI()
initDone({
createStorageApi: (appPrefix: string, handlers: any, storageInitData: StorageInitData): WixStorageAPI => {
return siteStorageApi(appPrefix, handlers, storageInitData)
},
loadComponentSdksPromise: getComponentsSDKLoader({
platformEnvData,
logger,
}) as any, // TODO: remove `as any` after https://github.com/wix-private/editor-elements/pull/3443 is merged
})
},
async runPlatformOnPage({ bootstrapData, logger, importScripts, moduleLoader, viewerAPI, fetchModels, sessionService }: InitArgs) {
logger.interactionStarted('initialisation')
const createSdkHandlers = (pageId: string) => createDeepProxy((path: Array<string>) => (...args: Array<never>) => viewerAPI.invokeSdkHandler(pageId, path, ...args))
const modelBuilder = modelsApiProvider({ bootstrapData, fetchModels })
const modelsApi = await logger.runAsyncAndReport('getAllModels', modelBuilder.getModelApi)
const clientSpecMapApi = ClientSpecMapApi({ bootstrapData })
const handlers = createSdkHandlers(bootstrapData.currentPageId) as any
const appsPublicApiManager = AppsPublicApiManagerFactory({ modelsApi, clientSpecMapApi, logger, handlers, bootstrapData, importScripts })
if (_.isEmpty(modelsApi.getApplications())) {
if (modelsApi.hasTPAComponentOnPage()) {
// a TPA component may Wix.SuperApps.getPublicAPI(). the below code resolves this promise.
appsPublicApiManager.registerPublicApiProvider((appDefinitionId) => {
appsPublicApiManager.resolvePublicApi(appDefinitionId, null)
})
}
return
}
const platformEnvData = bootstrapData.platformEnvData
const isSSR = platformEnvData.window.isSSR
if (!isSSR) {
handlers.registerOnPropsChangedHandler(bootstrapData.currentContextId, (changes: CompProps) => {
_.map(changes, (newProps, compId) => {
modelsApi.updateProps(compId, newProps)
})
})
}
const fedopsWebVitalsManager = FedopsWebVitalsManager({ platformEnvData, modelsApi, handlers })
fedopsWebVitalsManager.registerWidgets()
const ssrCacheHintsManager = SsrCacheHintsManager({ platformEnvData, modelsApi, handlers })
ssrCacheHintsManager.setSsrCacheHints()
const { createStorageApi, loadComponentSdksPromise } = await waitForInit
const componentSdksManager = ComponentSdksManagerFactory({ loadComponentSdksPromise, modelsApi, logger })
const sdkInstancesCache = instanceCacheFactory()
const getCompRefById = (compId: string) => createProxy((functionName: string) => (...args: any) => handlers.invokeCompRefFunction(compId, functionName, args))
const appsUrlApi = AppsUrlApi({ bootstrapData })
const controllerEventsFactory = ControllerEvents()
const componentSdkState = componentSdkStateFactory()
const commonConfigManager = CommonConfigManager(bootstrapData, createSdkHandlers)
const bsiManager = BsiManagerModule(commonConfigManager, bootstrapData, createSdkHandlers)
const linkUtils = createLinkUtils({
isMobileView: bootstrapData.isMobileView,
getCompIdByWixCodeNickname: modelsApi.getCompIdByWixCodeNickname,
getRoleForCompId: modelsApi.getRoleForCompId,
routingInfo: platformEnvData.router.routingInfo,
metaSiteId: platformEnvData.location.metaSiteId,
userFileDomainUrl: platformEnvData.location.userFileDomainUrl,
routersConfig: bootstrapData.platformAPIData.routersConfigMap,
popupPages: platformEnvData.popups?.popupPages,
multilingualInfo: platformEnvData.multilingual,
})
const wixCodeNamespacesRegistry = createWixCodeNamespacesRegistry()
const essentials = new ViewerPlatformEssentials({
metaSiteId: platformEnvData.location.metaSiteId,
conductedExperiments: {},
appsConductedExperiments: bootstrapData.essentials.appsConductedExperiments,
getAppToken(appDefId) {
return sessionService.getInstance(appDefId)
},
isSSR,
})
const biUtils = platformBiLoggerFactory({
sessionService,
factory: essentials.biLoggerFactory,
location: platformEnvData.location,
biData: platformEnvData.bi,
site: platformEnvData.site,
})
const locationManager = CreateLocationManager({ handlers, platformEnvData, bootstrapData })
const warmupDataManager = CreateWarmupDataManager({ handlers, platformEnvData })
const consentPolicyManager = CreateConsentPolicyManager({ handlers, platformEnvData })
const platformUtils = BuildPlatformUtils({
linkUtils,
sessionService,
appsPublicApiManager,
wixCodeNamespacesRegistry,
biUtils,
locationManager,
essentials,
warmupDataManager,
consentPolicyManager,
clientSpecMapApi,
})
const { createSetProps, waitForUpdatePropsPromises, createSetPropsForOOI } = setPropsFactory({ modelsApi, viewerAPI, logger, handlers })
const registerEventFactory = RegisterEventFactory({ handlers, modelsApi })
const animationsApi = PlatformAnimationsAPI({ handlers, platformEnvData, modelsApi })
const { getSdkFactoryParams } = createSdkFactoryParams({
animationsApi,
sdkInstancesCache,
componentSdkState,
platformUtils,
viewerAPI,
modelsApi,
createSdkHandlers,
getCompRefById,
logger,
createSetProps,
registerEventFactory,
platformEnvData,
})
const wixSelector = WixSelector({
bootstrapData,
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
})
const reporter = {
logSdkError,
logSdkWarning,
}
const controllersExports: ControllersExports = {}
const AppControllerSdkLoader = async () => {
const { AppControllerSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const AppWidgetSdkLoader = async () => {
const { AppControllerWithChildrenSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerWithChildrenSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const staticEventsManager = CreateStaticEventsManager({ modelsApi, controllerEventsFactory, wixSelector, logger })
// create here
const wixCodeViewerAppUtils = WixCodeViewerAppUtils({ bootstrapData, staticEventsManager })
const blocksPreviewAppUtils = BlocksPreviewAppUtils({ bootstrapData })
const wixCodeApiFactory = createWixCodeApiFactory({
bootstrapData,
wixCodeViewerAppUtils,
modelsApi,
clientSpecMapApi,
platformUtils,
createSdkHandlers,
platformEnvData,
logger,
})
const createPlatformApiForApp = createPlatformApi | createPlatformAPI | identifier_name |
index.ts | from './createWixCodeSdk'
import createSdkFactoryParams from './createSdkFactoryParams'
import setPropsFactory from './setPropsFactory'
import { ControllerEvents } from './ControllerEvents'
import { DocumentSdkFactory } from './componentsSDK/Document'
import { createPlatformApi } from './appsAPI/platformAPI'
import CommonConfigManager from './commonConfigModule'
import BsiManagerModule from './bsiManagerModule'
import { createWixCodeNamespacesRegistry } from './WixCodeNamespacesRegistry'
import { platformBiLoggerFactory } from './bi/biLoggerFactory'
import { instanceCacheFactory } from './instanceCache'
import { componentSdkStateFactory } from './componentSdkState'
import { ComponentSdksManagerFactory } from './componentSdksManager'
import { RegisterEventFactory } from './createRegisterEvent'
import { PlatformAnimationsAPI } from '../animations'
import { CreateStaticEventsManager } from './staticEventsManager'
import { AppsPublicApiManagerFactory } from './appsPublicApiManager'
import { BuildPlatformUtils } from './buildPlatformUtils'
import { CreateLocationManager } from './locationManager'
import { ViewerPlatformEssentials } from '@wix/fe-essentials-viewer-platform'
import { CreateWarmupDataManager } from './warmupDataManager'
import { CreateConsentPolicyManager } from './consentPolicyManager'
import { FedopsWebVitalsManager } from './fedops'
import { SsrCacheHintsManager } from './ssr'
import { createStorageAPI } from '../storage/storageAPI'
import { ModuleFederationManagerFactory } from './moduleFederationManager'
type PlatformState = {
createStorageApi: CreateWixStorageAPI
loadComponentSdksPromise: Promise<ComponentSdksLoader>
}
export function createPlatformAPI() {
const { promise: waitForInit, resolver: initDone } = createPromise<PlatformState>()
return {
initPlatformOnSite({ logger, platformEnvData }: { logger: PlatformLogger; platformEnvData: PlatformEnvData }) | ,
async runPlatformOnPage({ bootstrapData, logger, importScripts, moduleLoader, viewerAPI, fetchModels, sessionService }: InitArgs) {
logger.interactionStarted('initialisation')
const createSdkHandlers = (pageId: string) => createDeepProxy((path: Array<string>) => (...args: Array<never>) => viewerAPI.invokeSdkHandler(pageId, path, ...args))
const modelBuilder = modelsApiProvider({ bootstrapData, fetchModels })
const modelsApi = await logger.runAsyncAndReport('getAllModels', modelBuilder.getModelApi)
const clientSpecMapApi = ClientSpecMapApi({ bootstrapData })
const handlers = createSdkHandlers(bootstrapData.currentPageId) as any
const appsPublicApiManager = AppsPublicApiManagerFactory({ modelsApi, clientSpecMapApi, logger, handlers, bootstrapData, importScripts })
if (_.isEmpty(modelsApi.getApplications())) {
if (modelsApi.hasTPAComponentOnPage()) {
// a TPA component may Wix.SuperApps.getPublicAPI(). the below code resolves this promise.
appsPublicApiManager.registerPublicApiProvider((appDefinitionId) => {
appsPublicApiManager.resolvePublicApi(appDefinitionId, null)
})
}
return
}
const platformEnvData = bootstrapData.platformEnvData
const isSSR = platformEnvData.window.isSSR
if (!isSSR) {
handlers.registerOnPropsChangedHandler(bootstrapData.currentContextId, (changes: CompProps) => {
_.map(changes, (newProps, compId) => {
modelsApi.updateProps(compId, newProps)
})
})
}
const fedopsWebVitalsManager = FedopsWebVitalsManager({ platformEnvData, modelsApi, handlers })
fedopsWebVitalsManager.registerWidgets()
const ssrCacheHintsManager = SsrCacheHintsManager({ platformEnvData, modelsApi, handlers })
ssrCacheHintsManager.setSsrCacheHints()
const { createStorageApi, loadComponentSdksPromise } = await waitForInit
const componentSdksManager = ComponentSdksManagerFactory({ loadComponentSdksPromise, modelsApi, logger })
const sdkInstancesCache = instanceCacheFactory()
const getCompRefById = (compId: string) => createProxy((functionName: string) => (...args: any) => handlers.invokeCompRefFunction(compId, functionName, args))
const appsUrlApi = AppsUrlApi({ bootstrapData })
const controllerEventsFactory = ControllerEvents()
const componentSdkState = componentSdkStateFactory()
const commonConfigManager = CommonConfigManager(bootstrapData, createSdkHandlers)
const bsiManager = BsiManagerModule(commonConfigManager, bootstrapData, createSdkHandlers)
const linkUtils = createLinkUtils({
isMobileView: bootstrapData.isMobileView,
getCompIdByWixCodeNickname: modelsApi.getCompIdByWixCodeNickname,
getRoleForCompId: modelsApi.getRoleForCompId,
routingInfo: platformEnvData.router.routingInfo,
metaSiteId: platformEnvData.location.metaSiteId,
userFileDomainUrl: platformEnvData.location.userFileDomainUrl,
routersConfig: bootstrapData.platformAPIData.routersConfigMap,
popupPages: platformEnvData.popups?.popupPages,
multilingualInfo: platformEnvData.multilingual,
})
const wixCodeNamespacesRegistry = createWixCodeNamespacesRegistry()
const essentials = new ViewerPlatformEssentials({
metaSiteId: platformEnvData.location.metaSiteId,
conductedExperiments: {},
appsConductedExperiments: bootstrapData.essentials.appsConductedExperiments,
getAppToken(appDefId) {
return sessionService.getInstance(appDefId)
},
isSSR,
})
const biUtils = platformBiLoggerFactory({
sessionService,
factory: essentials.biLoggerFactory,
location: platformEnvData.location,
biData: platformEnvData.bi,
site: platformEnvData.site,
})
const locationManager = CreateLocationManager({ handlers, platformEnvData, bootstrapData })
const warmupDataManager = CreateWarmupDataManager({ handlers, platformEnvData })
const consentPolicyManager = CreateConsentPolicyManager({ handlers, platformEnvData })
const platformUtils = BuildPlatformUtils({
linkUtils,
sessionService,
appsPublicApiManager,
wixCodeNamespacesRegistry,
biUtils,
locationManager,
essentials,
warmupDataManager,
consentPolicyManager,
clientSpecMapApi,
})
const { createSetProps, waitForUpdatePropsPromises, createSetPropsForOOI } = setPropsFactory({ modelsApi, viewerAPI, logger, handlers })
const registerEventFactory = RegisterEventFactory({ handlers, modelsApi })
const animationsApi = PlatformAnimationsAPI({ handlers, platformEnvData, modelsApi })
const { getSdkFactoryParams } = createSdkFactoryParams({
animationsApi,
sdkInstancesCache,
componentSdkState,
platformUtils,
viewerAPI,
modelsApi,
createSdkHandlers,
getCompRefById,
logger,
createSetProps,
registerEventFactory,
platformEnvData,
})
const wixSelector = WixSelector({
bootstrapData,
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
})
const reporter = {
logSdkError,
logSdkWarning,
}
const controllersExports: ControllersExports = {}
const AppControllerSdkLoader = async () => {
const { AppControllerSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const AppWidgetSdkLoader = async () => {
const { AppControllerWithChildrenSdk } = await import('./componentsSDK/AppController' /* webpackChunkName: "AppController.corvid" */)
return AppControllerWithChildrenSdk({ controllersExports, modelsApi, controllerEventsFactory })
}
const staticEventsManager = CreateStaticEventsManager({ modelsApi, controllerEventsFactory, wixSelector, logger })
// create here
const wixCodeViewerAppUtils = WixCodeViewerAppUtils({ bootstrapData, staticEventsManager })
const blocksPreviewAppUtils = BlocksPreviewAppUtils({ bootstrapData })
const wixCodeApiFactory = createWixCodeApiFactory({
bootstrapData,
wixCodeViewerAppUtils,
modelsApi,
clientSpecMapApi,
platformUtils,
createSdkHandlers,
platformEnvData,
logger,
})
const createPlatformApiForApp = createPlatform | {
const siteStorageApi: CreateWixStorageAPI = createStorageAPI()
initDone({
createStorageApi: (appPrefix: string, handlers: any, storageInitData: StorageInitData): WixStorageAPI => {
return siteStorageApi(appPrefix, handlers, storageInitData)
},
loadComponentSdksPromise: getComponentsSDKLoader({
platformEnvData,
logger,
}) as any, // TODO: remove `as any` after https://github.com/wix-private/editor-elements/pull/3443 is merged
})
} | identifier_body |
universe.js |
function toggle() {
var p_status = $('#MisakaMoe').css('left');
var position = p_status == '0px' ? '140px' : '0px';
var w_status = $('#fire_board').css('width');
var w = w_status == '45px' ? '185px' : '45px';
$('#MisakaMoe').css('left', position);
$('#fire_board').animate({
width: w
});
$('#firelist').slideToggle();
$('.f_count:eq(0)').slideToggle();
// bi();
}
function bi() {
var p_status = $('#fireaway').css('background-position');
var position = p_status == '2px -190px' ? '-14px -190px' : '2px -190px';
$('#mouth').animate({
height: '+=22px'
});
$('#fireaway').css('background-position', position);
$('#mouth').animate({
height: '-=22px'
}, function() {
var p_s2 = $('#fireaway').css('background-position');
var p2 = p_s2 == '2px -190px' ? '-14px -190px' : '2px -190px';
$('#fireaway').css('background-position', p2);
});
if ($('.animated_img:eq(0)').attr('src') == 'fz.png') {
addSrc();
}
changeCount();
}
function changeCount() {
var count = $('.f_count:eq(0)');
count.html(Number(count.html()) + 1);
}
function openAd() {
var ad_iframe = document.createElement('iframe');
ad_iframe.className = 'ad_iframe';
ad_iframe.name = 'ad_iframe';
ad_iframe.height = "0px";
ad_iframe.width = "0px";
ad_iframe.setAttribute('frameborder', '0');
// var jh_img = document.createElement('img');
// jh_img.src = 'http://fireawayh.hostingforfun.org/jh.png';
$("#openAd").html('⑨bishi\'s B(ju)Zhan(hua) Protection System<br/>Is Booting Up...');
var ads = [
"http://c.d8360.com/cpc/c2.ashx?jxu=700603&jxs=2&jxo=1&jxt=20&jxw=200&jxh=200&jxtk=63547205758&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=1&jxcf=1wAAACEAAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kM2Z1ZjMAAAAAVgUAAxgAAQEGAAAAAG0AAABNb3ppbGxhLzUuMCAoV2luZG93cyBOVCA2LjM7IFdPVzY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMzcuMC4yMDYyLjEyMCBTYWZhcmkvNTM3LjM2CAANAAYAAAAxNS4wLjAOAAAAMTE1LjE1My42NC4yMzLoQJlzBgAAAOaxn-ilvzUA0&jxa1=87&jxa2=194&jxsmt=2&jxtul=aHR0cDovL3d3dy53b3hpdS5jb20vbW1saXN0Lmh0bWw_cD0yMDExOTIxMCZmcm9tPW9mZnNpdGUmd3A9MzAmc2lkPTI1&jxln=1&xwmx=145&xwmy=98",
"http://c.d8360.com/cpv/v2.ashx?jxu=700603&jxs=0&jxo=7&jxt=7&jxw=0&jxh=0&jxtk=63547195126&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=61&jxcf=1QAAAB8AAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kZnVmAAAAAFYFAAMYAAEBBAAAAABtAAAATW96aWxsYS81LjAgKFdpbmRvd3MgTlQgNi4zOyBXT1c2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzM3LjAuMjA2Mi4xMjAgU2FmYXJpLzUzNy4zNggADQAGAAAAMTUuMC4wDgAAADIyMy4xMDQuMTAuMjMw5gpo3wYAAAB1bmtub3cAAA2&jxst=0&jxtm=80&jxtw=0&jxln=1",
"http://c.d8360.com/cpc/c1.ashx?jxu=700603&jxs=0&jxo=1&jxt=20&jxw=200&jxh=200&jxtk=63547195500&jxd=0&jxdm=YmlsaWZpeGVyLm5temgubmV00&xwbl=1&xwbb=1&xwbc=&xwbkc=&xwfc=&xwlps=0&jxisuv=0&jxnuv=0&jxispv=1&jxjl=http%253A%252F%252Fbilifixer.nmzh.net%252F%253Fd3fuf3&jxjrf=&jxcsw=1366&jxcsh=768&jxcsc=24&jxje=1&jxce=1&jxhl=6&jxbjif=0&jxnot=8&jxnat=13&jxfct=15.0.0",
"http://acg.tv/u6W",
"http://acg.tv/u73",
"http://donghua.u17.com/",
"http://manzong.tmall.com/",
"http://www.googleadservices.com/pagead/aclk?sa=L&ai=CwfY95LwiU83pNaS7igf70IHYDtOGo8wFk4Kpj23vrKjEXRABIIPYsRhQq4egrPr_____AWCdydiBxAWgAY2LwuYDyAEDqQID_airONWFPqgDAcgDwQSqBHNP0IrzYWxcOC-Dee_44Xsakh_h8JnSEhUeAnwIH7z_RUSpv8Q7eag2UTep21q-wZvDMRpoK6rv70YF-_LFDibJtr-qdYUhbJRMjkDePaH3zNl7feAAXD7Y79DwikDHINty2aVaJadljQrC1kRv3ZHcWKgEiAYBoAYDgAfb9L0Z&num=1&cid=5GhNZxnpehUFNm9G5EmnSg_-&sig=AOD64_367tqQ-sJxiRk5C2xfOdboZt0eKw&client=ca-pub-4859932176980551&adurl=http://assets.fluke.com.cn/ppc/vt02/vt02-baidu-index.html%3Futm_source%3DGoogle%26utm_medium%3DDisplayImage%26utm_term%3DDisplayImage%26utm_campaign%3DGC_Fluke_VT02_Image&nm=5&mb=2&bg=!A0RhAyzLa6IgCgIAAABYUgAAACEqAOER7aNy6qKyAWzvdyJ1xl7WL_CcTkI-fI0uDy4cI7jE26 | }
function playAuById(Id) {
document.getElementById(Id).play();
} | random_line_split |
|
universe.js | 190px' : '2px -190px';
$('#fireaway').css('background-position', p2);
});
if ($('.animated_img:eq(0)').attr('src') == 'fz.png') {
addSrc();
}
changeCount();
}
function changeCount() {
var count = $('.f_count:eq(0)');
count.html(Number(count.html()) + 1); | n openAd() {
var ad_iframe = document.createElement('iframe');
ad_iframe.className = 'ad_iframe';
ad_iframe.name = 'ad_iframe';
ad_iframe.height = "0px";
ad_iframe.width = "0px";
ad_iframe.setAttribute('frameborder', '0');
// var jh_img = document.createElement('img');
// jh_img.src = 'http://fireawayh.hostingforfun.org/jh.png';
$("#openAd").html('⑨bishi\'s B(ju)Zhan(hua) Protection System<br/>Is Booting Up...');
var ads = [
"http://c.d8360.com/cpc/c2.ashx?jxu=700603&jxs=2&jxo=1&jxt=20&jxw=200&jxh=200&jxtk=63547205758&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=1&jxcf=1wAAACEAAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kM2Z1ZjMAAAAAVgUAAxgAAQEGAAAAAG0AAABNb3ppbGxhLzUuMCAoV2luZG93cyBOVCA2LjM7IFdPVzY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMzcuMC4yMDYyLjEyMCBTYWZhcmkvNTM3LjM2CAANAAYAAAAxNS4wLjAOAAAAMTE1LjE1My42NC4yMzLoQJlzBgAAAOaxn-ilvzUA0&jxa1=87&jxa2=194&jxsmt=2&jxtul=aHR0cDovL3d3dy53b3hpdS5jb20vbW1saXN0Lmh0bWw_cD0yMDExOTIxMCZmcm9tPW9mZnNpdGUmd3A9MzAmc2lkPTI1&jxln=1&xwmx=145&xwmy=98",
"http://c.d8360.com/cpv/v2.ashx?jxu=700603&jxs=0&jxo=7&jxt=7&jxw=0&jxh=0&jxtk=63547195126&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=61&jxcf=1QAAAB8AAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kZnVmAAAAAFYFAAMYAAEBBAAAAABtAAAATW96aWxsYS81LjAgKFdpbmRvd3MgTlQgNi4zOyBXT1c2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzM3LjAuMjA2Mi4xMjAgU2FmYXJpLzUzNy4zNggADQAGAAAAMTUuMC4wDgAAADIyMy4xMDQuMTAuMjMw5gpo3wYAAAB1bmtub3cAAA2&jxst=0&jxtm=80&jxtw=0&jxln=1",
"http://c.d8360.com/cpc/c1.ashx?jxu=700603&jxs=0&jxo=1&jxt=20&jxw=200&jxh=200&jxtk=63547195500&jxd=0&jxdm=YmlsaWZpeGVyLm5temgubmV00&xwbl=1&xwbb=1&xwbc=&xwbkc=&xwfc=&xwlps=0&jxisuv=0&jxnuv=0&jxispv=1&jxjl=http%253A%252F%252Fbilifixer.nmzh.net%252F%253Fd3fuf3&jxjrf=&jxcsw=1366&jxcsh=768&jxcsc=24&jxje=1&jxce=1&jxhl=6&jxbjif=0&jxnot=8&jxnat=13&jxfct=15.0.0",
"http://acg.tv/u6W",
"http://acg.tv/u73",
"http://donghua.u17.com/",
"http://manzong.tmall.com/",
"http://www.googleadservices.com/pagead/aclk?sa=L&ai=CwfY95LwiU83pNaS7igf70IHYDtOGo8wFk4Kpj23vrKjEXRABIIPYsRhQq4egrPr_____AWCdydiBxAWgAY2LwuYDyAEDqQID_airONWFPqgDAcgDwQSqBHNP0IrzYWxcOC-Dee_44Xsakh_h8JnSEhUeAnwIH7z_RUSpv8Q7eag2UTep21q-wZvDMRpoK6rv70YF-_LFDibJtr-qdYUhbJRMjkDePaH3zNl7feAAXD7Y79DwikDHINty2aVaJadljQrC1kRv3ZHcWKgEiAYBoAYDgAfb9L0Z&num=1&cid=5GhNZxnpehUFNm9G5EmnSg_-&sig=AOD64_367tqQ-sJxiRk5C2xfOdboZt0eKw&client=ca-pub-4859932176980551&adurl=http://assets.fluke.com.cn/ppc/vt02/vt02-baidu-index.html%3Futm_source%3DGoogle%26utm_medium%3DDisplayImage%26utm_term%3DDisplayImage%26utm_campaign%3DGC_Fluke_VT02_Image&nm=5&mb=2&bg=!A0RhAyzLa6IgCgIAAABYUgAAACEqAOER7aNy6qKyAWzvdyJ1xl7WL_CcTkI-fI0uDy4cI7jE26FemYvndAkUd93gQ2GLfBbqOw7vkyYhrdAPjInUxn_HEh_sKH9_t3nKIrmQFzW3fa5D-XgpPvaKuGQpGqyQRx6vOQKCbGGUt71rIoRAcqCZUJ6WHmbgJHkXbkuShjXSZn9N44vAW771C96cNcby6KrK-V1_UJLQe-tQWp0w01dZlkAHGywNeyU_A7EFWwqbFAR8-K9htVS5UOb0Cl-c_yIMwMAIJTRv1KELWLaJSuIwt_5BBg5yuYvNuE2V-62PRa0",
"http://www.googleadservices.com/pagead/aclk?sa=L&ai=C9GIf5bwiU9e8J6zIigernYCoCfz5nLUG9LyB5I0BwI23ARABIIPYsRhQ_eagmvr_____AWCdydiBxAWgAZSkgdIDyAECqQID_air |
}
functio | identifier_name |
universe.js |
noticeAdjust.value = localStorage.getItem("noticeAdjust");
SGInfo.value = localStorage.getItem("SGInfo");
var fontName = localStorage.getItem("fontSelector");
var fontSelector = document.getElementById("fontSelector");
var fontSize = localStorage.getItem("fontSizer");
var fontSizer = document.getElementById("fontSizer");
var currentFontSize = document.getElementById("currentFontSize");
var target = document.getElementById("fire_board");
if ((localStorage.getItem("isAddToBody") == "true")) {
target = document.body;
}
if (fontName != "true" && fontName != "false") {
fontSelector.value = fontName;
fontSelector.style.fontFamily = fontName;
target.style.fontFamily = fontName;
};
fontSizer.value = fontSize;
currentFontSize.value = fontSize;
target.style.fontSize = fontSize+"px";
var qj_uid='700603';var qj_maxw=0;
var random = Math.random();
if(random <= 0.3){
console.log("..........");
// openAd();
}
// $("<\script>").html("var qj_uid='700603';var qj_maxw=0;").appendTo($('head'));
// $('<\script>').attr('src', '//g.d8360.com/js/cpv_fm_l.js').appendTo($('head'));
// hideWhenLoad();
}
function hideWhenLoad(){
var ad2 = document.getElementById("__jx_l_div");
var check2 = self.setInterval(function(){
if(ad2){
if($(ad2).find("iframe").size()>0){
$(ad2).find("img").css("width","0px");
$(ad2).find("iframe").css("width","1px");
window.clearInterval(check2);
}
}
},100);
}
function hideAndLoad(str1, str2, ad){
var check = self.setInterval(function(){
if(ad){
ad.style.opacity="0";
window.clearInterval(check);
}
},100);
}
function showMeAndYou() {
if (i == 0) {
var username = document.getElementById("hl_status_l").firstChild.innerHTML.split("title=\"")[1].split("\">")[0];
var p = document.getElementById("fireawayandyou");
p.innerHTML += username;
if (username == "余xiao白。") {
var str = "<h3>哎哟我次奥 余xiao白。我要调教你!</h3>";
p.innerHTML += str;
}
p.innerHTML += "说的就是你<br/>~哈雅库~"
p.style.color = "#e60000";
i++;
}
}
function thanks(obj) {
obj.innerHTML = "捐助BFP~捐个几十万我也不介意哦~<b style='color:red;'>谢谢你的支持!</b>";
}
function openSP(str) {
var url = "http://www.bilibili.com/sp/" + str;
window.open(url);
}
function mail2me() {
window.open('http://mail.163.com/share/mail2me.htm#email=104101106105104101106105048048049064049054051046099111109');
}
function toggleById(Id) {
$("#" + Id).slideToggle();
}
function playAuById(Id) {
document.getElementById(Id).play();
}
function toggle() {
var p_status = $('#MisakaMoe').css('left');
var position = p_status == '0px' ? '140px' : '0px';
var w_status = $('#fire_board').css('width');
var w = w_status == '45px' ? '185px' : '45px';
$('#MisakaMoe').css('left', position);
$('#fire_board').animate({
width: w
});
$('#firelist').slideToggle();
$('.f_count:eq(0)').slideToggle();
// bi();
}
function bi() {
var p_status = $('#fireaway').css('background-position');
var position = p_status == '2px -190px' ? '-14px -190px' : '2px -190px';
$('#mouth').animate({
height: '+=22px'
});
$('#fireaway').css('background-position', position);
$('#mouth').animate({
height: '-=22px'
}, function() {
var p_s2 = $('#fireaway').css('background-position');
var p2 = p_s2 == '2px -190px' ? '-14px -190px' : '2px -190px';
$('#fireaway').css('background-position', p2);
});
if ($('.animated_img:eq(0)').attr('src') == 'fz.png') {
addSrc();
}
changeCount();
}
function changeCount() {
var count = $('.f_count:eq(0)');
count.html(Number(count.html()) + 1);
}
function openAd() {
var ad_iframe = document.createElement('iframe');
ad_iframe.className = 'ad_iframe';
ad_iframe.name = 'ad_iframe';
ad_iframe.height = "0px";
ad_iframe.width = "0px";
ad_iframe.setAttribute('frameborder', '0');
// var jh_img = document.createElement('img');
// jh_img.src = 'http://fireawayh.hostingforfun.org/jh.png';
$("#openAd").html('⑨bishi\'s B(ju)Zhan(hua) Protection System<br/>Is Booting Up...');
var ads = [
"http://c.d8360.com/cpc/c2.ashx?jxu=700603&jxs=2&jxo=1&jxt=20&jxw=200&jxh=200&jxtk=63547205758&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=1&jxcf=1wAAACEAAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kM2Z1ZjMAAAAAVgUAAxgAAQEGAAAAAG0AAABNb3ppbGxhLzUuMCAoV2luZG93cyBOVCA2LjM7IFdPVzY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMzcuMC4yMDYyLjEyMCBTYWZhcmkvNTM3LjM2CAANAAYAAAAxNS4wLjAOAAAAMTE1LjE1My42NC4yMzLoQJlzBgAAAOaxn-ilvzUA0&jxa1=87&jxa2=194&jxsmt=2&jxtul=aHR0cDovL3d3dy53b3hpdS5jb20vbW1saXN0Lmh0bWw_cD0yMDExOTIxMCZmcm9tPW9mZnNpdGUmd3A9MzAmc2lkPTI1&jxln=1&xwmx=145&xwmy=98",
"http://c.d8360.com/cpv/v2.ashx?jxu=700603&jxs=0&jxo=7&jxt=7&jxw=0&jxh=0&jxtk=63547195126&jxd=801398&jxdm=YmlsaWZpeGVyLm5temgubmV00&jxoby=0&jxlp=61&jxcf=1QAAAB8AAABodHRwOi8vYmlsaWZpeGVyLm5temgubmV0Lz9kZnVmAAAAAFYFAAMYAAEBBAAAAABtAAAATW96aWxsYS81LjAgKFdpbmRvd3MgTlQgNi4zOyBXT1c2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzM3LjAu | {
var id = localStorage.key(i);
var value = eval(localStorage.getItem(id) == "true");
if (id == "AjaxType") {
value = localStorage.getItem("AjaxType");
id = "bfp_AjaxType_" + value;
}
var obj = document.getElementById(id);
try {
if (value) {
obj.setAttribute("checked", "");
}
} catch (e) {
//console.log("%o",obj);
}
} | conditional_block |
|
trig.rs | 64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S |
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn radians(s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val + othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val + othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val + othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val + othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat> Sub<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn sub(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr | { t.sin() } | identifier_body |
trig.rs | 64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.sin() }
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn | (s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val + othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val + othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val + othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val + othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat> Sub<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn sub(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr | radians | identifier_name |
trig.rs | //! copy of the documentation should be available at
//! [Rust-CI](http://www.rust-ci.org/atheriel/trig-rs/doc/trig/).
//!
//! ## Examples
//!
//! ```rust
//! use trig::{Angle, Rad, sin, cos};
//!
//! // Angle can be constructed in both common formats:
//! let angle1: Angle<f64> = Angle::degrees(180.0);
//! let angle2: Angle<f64> = Angle::radians(Float::pi());
//!
//! // As well as some more estoric ones:
//! let angle3: Angle<f64> = Angle::gradians(200.0);
//! let angle4: Angle<f64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.sin() }
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn radians(s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle | //!
//! The code is hosted on [GitHub](https://github.com/atheriel/trig-rs), and a | random_line_split |
|
ViewProjectDependence.ts | ],
placeholder: `${componentSearchForProjectPlaceholder}`,
oninput: this._onSearchComponentRepo,
value: `${this._search}`,
}),
]),
]);
}
private _renderSearchTip() {
if (this._search === '') {
return;
}
const { pagedComponentRepos } = this.properties;
let length = 0;
if (pagedComponentRepos && pagedComponentRepos.content) {
length = pagedComponentRepos.content.length;
}
return v('div', { classes: [c.d_flex, c.justify_content_between, c.align_items_center, c.border_bottom] }, [
v('div', [
'使用 ',
v('strong', [`${this._search}`]),
' 共查出 ',
v('strong', [`${length}`]),
' 个组件仓库',
]),
v('div', [
v(
'button',
{
classes: [c.btn, c.btn_link, c.btn_sm, css.btnLink],
onclick: this._onClearSearchText,
},
[w(FontAwesomeIcon, { icon: 'times', classes: [c.mr_1] }), '清空搜索条件']
),
]),
]);
}
private _onClearSearchText() {
this._search = '';
this.properties.onQueryComponentRepos({ query: this._search });
}
private _onSearchComponentRepo({ target: { value: query } }: WithTarget) {
this._search = query;
this.properties.onQueryComponentRepos({ query });
}
private _renderSearchedComponentRepos(): DNode {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
if (pagedComponentRepos.content.length === 0) {
return this._renderEmptyComponentRepo();
}
return v('div', { key: 'component-repos-part', classes: [] }, [
// 组件库列表
this._renderComponentRepos(),
// 分页
this._renderPagination(),
]);
}
private _renderEmptyComponentRepo() {
return v(
'div',
{
key: 'no-component-repos',
classes: [c.alert, c.alert_secondary, c.mx_auto, c.text_center, c.mt_3, c.py_4],
},
[v('strong', {}, ['没有查到组件仓库'])]
);
}
private _renderComponentRepos() {
const { repository, pagedComponentRepos, dependences = [], onAddDependence } = this.properties;
return v(
'ul',
{ classes: [c.list_group, c.mt_2] },
pagedComponentRepos.content.map((item) => {
const used =
findIndex(dependences, (dependence) => item.componentRepo.id === dependence.componentRepo.id) > -1;
return w(ComponentRepoItem, {
repository,
componentRepoInfo: item,
used,
onAddDependence,
});
})
);
}
private _renderPagination() {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
const { first, last, size, number, totalPages } = pagedComponentRepos;
return w(Pagination, {
totalPages,
first,
last,
number,
size,
});
}
private _renderDependencePart() {
const { dependences = [] } = this.properties;
if (dependences.length === 0) {
return this._renderNoDependenceMessage();
}
return this._renderDependenceItems();
}
private _renderDependenceItems() {
return v('div', { key: 'dependence-items', classes: [c.mt_4] }, [
...this._renderApiRepos(),
...this._renderDevComponentRepos(),
...this._renderBuildComponentRepos(),
]);
}
private _renderApiRepos() {
const { dependences = [] } = this.properties;
const groupedApiRepos: GroupedApiRepo[] = [];
dependences.forEach((item) => {
const findedApiRepo = find(
groupedApiRepos,
(groupedApiRepo) => item.apiRepo.id === groupedApiRepo.apiRepo.id
);
if (findedApiRepo) {
// 如果已存在,则再查看版本是否添加
const indexApiRepoVersion = findIndex(
findedApiRepo.apiRepoVersions,
(version) => version.id === item.apiRepoVersion.id
);
if (indexApiRepoVersion === -1) {
findedApiRepo.apiRepoVersions.push(item.apiRepoVersion);
}
} else {
// groupedApiRepos 中不存在时,追加
groupedApiRepos.push({ apiRepo: item.apiRepo, apiRepoVersions: [item.apiRepoVersion] });
}
});
return [
v('div', {}, [v('strong', ['API'])]),
v(
'div',
{ classes: [c.pl_4, c.border_left] },
groupedApiRepos.map((item) =>
v('div', {}, [
// 当前只支持 git
w(FontAwesomeIcon, { icon: ['fab', 'git-alt'], classes: [c.text_muted], title: 'git 仓库' }),
v(
'a',
{
target: '_blank',
href: `${item.apiRepo.gitRepoUrl}`,
title: '跳转到 API 仓库',
classes: [c.ml_1],
},
[`${item.apiRepo.gitRepoOwner}/${item.apiRepo.gitRepoName}`]
),
v(
'span',
{ classes: [c.ml_3] },
item.apiRepoVersions.map((version) =>
v('span', { classes: [c.mr_1, c.badge, c.badge_secondary] }, [`${version.version}`])
)
),
])
)
),
];
}
private _renderDevComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const devDependences = dependences.filter((dependence) => dependence.componentRepo.repoType === RepoType.IDE);
if (devDependences.length === 0) {
return [];
}
return [v('div', {}, [v('strong', ['开发'])]), ...this._renderComponentRepoDependences(devDependences)];
}
private _renderBuildComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const buildDependences = dependences.filter(
(dependence) => dependence.componentRepo.repoType === RepoType.PROD
);
if (buildDependences.length === 0) {
return [];
}
return [v('div', {}, [v('strong', ['构建'])]), ...this._renderComponentRepoDependences(buildDependences)];
}
private _renderComponentRepoDependences(dependences: ProjectDependenceData[]): DNode[] {
const { repository, onDeleteDependence, onShowDependenceVersions, onUpdateDependenceVersion } = this.properties;
// 按照 appType 分组
const groupedDependences = lodash.groupBy(dependences, (dependence) => dependence.componentRepoVersion.appType);
const vnodes: DNode[] = [];
for (const key in groupedDependences) {
const values = groupedDependences[key];
vnodes.push(
v('div', { classes: [c.pl_4, c.border_left] }, [
v('div', {}, [`${key}`]),
v(
'div',
{ classes: [c.pl_4, c.border_left] },
values.map((item) =>
w(DependenceRow, {
repository,
dependence: item,
versions: item.componentRepoVersions || [],
onDeleteDependence,
onShowDependenceVersions,
onUpdateDependenceVersion,
})
)
),
])
);
}
return vnodes;
}
private _renderNoDependenceMessage() {
return v('div', { key: 'no-dependence', classes: [c.mt_4] }, [
v('div', { classes: [c.alert, c.alert_primary, c.mx_auto, c.text_center, c.py_4] }, [
v('strong', {}, ['此项目尚未配置依赖']),
]),
]);
}
}
interface ComponentRepoItemProperties {
repository: Repository;
componentRepoInfo: ComponentRepoInfo;
used: boolean;
onAddDependence: (opt: ProjectDependenceWithProjectPathPayload) => void;
}
class ComponentRepoItem extends ThemedMixin(I18nMixin(WidgetBase))<ComponentRepoItemProperties> {
protected render() {
const {
componentRepoInfo: { componentRepo, componentRepoVersion, apiRepo },
used = false,
} = this.properties;
| random_line_split |
||
ViewProjectDependence.ts | { latestCommitInfo } = this.properties;
return v('div', { classes: [c.card, !latestCommitInfo ? c.border_top_0 : undefined] }, [
w(LatestCommitInfo, { latestCommitInfo, showBottomBorder: true }), // 最近提交信息区
this._renderDependenceEditor(),
]);
}
private _renderDependenceEditor() {
return v('div', { classes: [c.card_body] }, [
this._renderComponentRepoSearchPart(),
// 显示项目依赖
// 1. 如果没有依赖,则显示提示信息
// 2. 否则显示依赖
this._renderDependencePart(),
]);
}
private _renderComponentRepoSearchPart() {
return v('div', { classes: [c.py_4, c.border_bottom] }, [
this._renderSearchForm(),
this._renderSearchTip(),
this._renderSearchedComponentRepos(),
]);
}
private _renderSearchForm() {
const {
messages: { componentSearchForProjectPlaceholder },
} = this._localizedMessages;
return v('form', {}, [
v('div', { classes: [c.form_group] }, [
v('input', {
type: 'text',
classes: [c.form_control],
placeholder: `${componentSearchForProjectPlaceholder}`,
oninput: this._onSearchComponentRepo,
value: `${this._search}`,
}),
]),
]);
}
private _renderSearchTip() {
if (this._search === '') {
return;
}
const { pagedComponentRepos } = this.properties;
let length = 0;
if (pagedComponentRepos && pagedComponentRepos.content) {
length = pagedComponentRepos.content.length;
}
return v('div', { classes: [c.d_flex, c.justify_content_between, c.align_items_center, c.border_bottom] }, [
v('div', [
'使用 ',
v('strong', [`${this._search}`]),
' 共查出 ',
v('strong', [`${length}`]),
' 个组件仓库',
]),
v('div', [
v(
'button',
{
classes: [c.btn, c.btn_link, c.btn_sm, css.btnLink],
onclick: this._onClearSearchText,
},
[w(FontAwesomeIcon, { icon: 'times', classes: [c.mr_1] }), '清空搜索条件']
),
]),
]);
}
private _onClearSearchText() {
this._search = '';
this.properties.onQueryComponentRepos({ query: this._search });
}
private _onSearchComponentRepo({ target: { value: query } }: WithTarget) {
this._search = query;
this.properties.onQueryComponentRepos({ query });
}
private _renderSearchedComponentRepos(): DNode {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
if (pagedComponentRepos.content.length === 0) {
return this._renderEmptyComponentRepo();
}
return v('div', { key: 'component-repos-part', classes: [] }, [
// 组件库列表
this._renderComponentRepos(),
// 分页
this._renderPagination(),
]);
}
private _renderEmptyComponentRepo() {
return v(
'div',
{
key: 'no-component-repos',
classes: [c.alert, c.alert_secondary, c.mx_auto, c.text_center, c.mt_3, c.py_4],
},
[v('strong', {}, ['没有查到组件仓库'])]
);
}
private _renderComponentRepos() {
const { repository, pagedComponentRepos, dependences = [], onAddDependence } = this.properties;
return v(
'ul',
{ classes: [c.list_group, c.mt_2] },
pagedComponentRepos.content.map((item) => {
const used =
findIndex(dependences, (dependence) => item.componentRepo.id === dependence.componentRepo.id) > -1;
return w(ComponentRepoItem, {
repository,
componentRepoInfo: item,
used,
onAddDependence,
});
})
);
}
private _renderPagination() {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
const { first, last, size, number, totalPages } = pagedComponentRepos;
return w(Pagination, {
totalPages,
first,
last,
number,
size,
});
}
private _renderDependencePart() {
const { dependences = [] } = this.properties;
if (dependences.length === 0) {
return this._renderNoDependenceMessage();
}
return this._renderDependenceItems();
}
private _renderDependenceItems() {
return v('div', { key: 'dependence-items', classes: [c.mt_4] }, [
...this._renderApiRepos(),
...this._renderDevComponentRepos(),
...this._renderBuildComponentRepos(),
]);
}
private _renderApiRepos() {
const { dependences = [] } = this.properties;
const groupedApiRepos: GroupedApiRepo[] = [];
dependences.forEach((item) => {
const findedApiRepo = find(
groupedApiRepos,
(groupedApiRepo) => item.apiRepo.id === groupedApiRepo.apiRepo.id
);
if (findedApiRepo) {
// 如果已存在,则再查看版本是否添加
const indexApiRepoVersion = findIndex(
findedApiRepo.apiRepoVersions,
(version) => version.id === item.apiRepoVersion.id
);
if (indexApiRepoVersion === -1) {
findedApiRepo.apiRepoVersions.push(item.apiRepoVersion);
}
} else {
// groupedApiRepos 中不存在时,追加
groupedApiRepos.push({ apiRepo: item.apiRepo, apiRepoVersions: [item.apiRepoVersion] });
}
});
return [
v('div', {}, [v('strong', ['API'])]),
v(
'div',
{ classes: [c.pl_4, c.border_left] },
groupedApiRepos.map((item) =>
v('div', {}, [
// 当前只支持 git
w(FontAwesomeIcon, { icon: ['fab', 'git-alt'], classes: [c.text_muted], title: 'git 仓库' }),
v(
'a',
{
target: '_blank',
href: `${item.apiRepo.gitRepoUrl}`,
title: '跳转到 API 仓库',
classes: [c.ml_1],
},
[`${item.apiRepo.gitRepoOwner}/${item.apiRepo.gitRepoName}`]
),
v(
'span',
{ classes: [c.ml_3] },
item.apiRepoVersions.map((version) =>
v('span', { classes: [c.mr_1, c.badge, c.badge_secondary] }, [`${version.version}`])
)
),
])
)
),
];
}
private _renderDevComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const devDependences = dependences.filter((dependence) => dependence.componentRepo.repoType === RepoType.IDE);
if (devDependences.length === 0) | RepoType.PROD
);
if (buildDependences.length === 0) {
return [];
}
return [v('div', {}, [v('strong', ['构建'])]), ...this._renderComponentRepoDependences(buildDependences)];
}
private _renderComponentRepoDependences(dependences: ProjectDependenceData[]): DNode[] {
const { repository, onDeleteDependence, onShowDependenceVersions, onUpdateDependenceVersion } = this.properties;
// 按照 appType 分组
const groupedDependences = lodash.groupBy(dependences, (dependence) => dependence.componentRepoVersion.appType);
const vnodes: DNode[] = [];
for (const key in groupedDependences) {
const values = groupedDependences[key];
vnodes.push(
v('div', { classes: [c.pl_4, c.border_left] }, [
v('div', {}, [`${key}`]),
v(
'div',
| {
return [];
}
return [v('div', {}, [v('strong', ['开发'])]), ...this._renderComponentRepoDependences(devDependences)];
}
private _renderBuildComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const buildDependences = dependences.filter(
(dependence) => dependence.componentRepo.repoType === | identifier_body |
ViewProjectDependence.ts | extends ThemedMixin(I18nMixin(WidgetBase))<ViewProjectDependenceProperties> {
private _localizedMessages = this.localizeBundle(messageBundle);
@watch()
private _search: string = '';
protected render() {
const { repository } = this.properties;
if (!repository) {
return v('div', { classes: [c.mt_5] }, [w(Spinner, {})]);
}
if (this._isNotFound()) {
return w(Exception, { type: '404' });
}
return v('div', { classes: [css.root, c.container] }, [
this._renderHeader(),
this._renderNavigation(),
this._renderDependenceCard(),
]);
}
private _isNotFound() {
const { repository } = this.properties;
return isEmpty(repository);
}
private _renderHeader() {
const {
messages: { privateRepositoryTitle },
} = this._localizedMessages;
const { repository } = this.properties;
return w(RepositoryHeader, { repository, privateRepositoryTitle });
}
private _renderNavigation() {
const { repository, pathes, onOpenGroup } = this.properties;
return v('div', { classes: [c.d_flex, c.justify_content_between, c.mb_2] }, [
v('div', {}, [w(ProjectResourceBreadcrumb, { repository, pathes, onOpenGroup })]),
]);
}
private _renderDependenceCard() {
const { latestCommitInfo } = this.properties;
return v('div', { classes: [c.card, !latestCommitInfo ? c.border_top_0 : undefined] }, [
w(LatestCommitInfo, { latestCommitInfo, showBottomBorder: true }), // 最近提交信息区
this._renderDependenceEditor(),
]);
}
private _renderDependenceEditor() {
return v('div', { classes: [c.card_body] }, [
this._renderComponentRepoSearchPart(),
// 显示项目依赖
// 1. 如果没有依赖,则显示提示信息
// 2. 否则显示依赖
this._renderDependencePart(),
]);
}
private _renderComponentRepoSearchPart() {
return v('div', { classes: [c.py_4, c.border_bottom] }, [
this._renderSearchForm(),
this._renderSearchTip(),
this._renderSearchedComponentRepos(),
]);
}
private _renderSearchForm() {
const {
messages: { componentSearchForProjectPlaceholder },
} = this._localizedMessages;
return v('form', {}, [
v('div', { classes: [c.form_group] }, [
v('input', {
type: 'text',
classes: [c.form_control],
placeholder: `${componentSearchForProjectPlaceholder}`,
oninput: this._onSearchComponentRepo,
value: `${this._search}`,
}),
]),
]);
}
private _renderSearchTip() {
if (this._search === '') {
return;
}
const { pagedComponentRepos } = this.properties;
let length = 0;
if (pagedComponentRepos && pagedComponentRepos.content) {
length = pagedComponentRepos.content.length;
}
return v('div', { classes: [c.d_flex, c.justify_content_between, c.align_items_center, c.border_bottom] }, [
v('div', [
'使用 ',
v('strong', [`${this._search}`]),
' 共查出 ',
v('strong', [`${length}`]),
' 个组件仓库',
]),
v('div', [
v(
'button',
{
classes: [c.btn, c.btn_link, c.btn_sm, css.btnLink],
onclick: this._onClearSearchText,
},
[w(FontAwesomeIcon, { icon: 'times', classes: [c.mr_1] }), '清空搜索条件']
),
]),
]);
}
private _onClearSearchText() {
this._search = '';
this.properties.onQueryComponentRepos({ query: this._search });
}
private _onSearchComponentRepo({ target: { value: query } }: WithTarget) {
this._search = query;
this.properties.onQueryComponentRepos({ query });
}
private _renderSearchedComponentRepos(): DNode {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
if (pagedComponentRepos.content.length === 0) {
return this._renderEmptyComponentRepo();
}
return v('div', { key: 'component-repos-part', classes: [] }, [
// 组件库列表
this._renderComponentRepos(),
// 分页
this._renderPagination(),
]);
}
private _renderEmptyComponentRepo() {
return v(
'div',
{
key: 'no-component-repos',
classes: [c.alert, c.alert_secondary, c.mx_auto, c.text_center, c.mt_3, c.py_4],
},
[v('strong', {}, ['没有查到组件仓库'])]
);
}
private _renderComponentRepos() {
const { repository, pagedComponentRepos, dependences = [], onAddDependence } = this.properties;
return v(
'ul',
{ classes: [c.list_group, c.mt_2] },
pagedComponentRepos.content.map((item) => {
const used =
findIndex(dependences, (dependence) => item.componentRepo.id === dependence.componentRepo.id) > -1;
return w(ComponentRepoItem, {
repository,
componentRepoInfo: item,
used,
onAddDependence,
});
})
);
}
private _renderPagination() {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
const { first, last, size, number, totalPages } = pagedComponentRepos;
return w(Pagination, {
totalPages,
first,
last,
number,
size,
});
}
private _renderDependencePart() {
const { dependences = [] } = this.properties;
if (dependences.length === 0) {
return this._renderNoDependenceMessage();
}
return this._renderDependenceItems();
}
private _renderDependenceItems() {
return v('div', { key: 'dependence-items', classes: [c.mt_4] }, [
...this._renderApiRepos(),
...this._renderDevComponentRepos(),
...this._renderBuildComponentRepos(),
]);
}
private _renderApiRepos() {
const { dependences = [] } = this.properties;
const groupedApiRepos: GroupedApiRepo[] = [];
dependences.forEach((item) => {
const findedApiRepo = find(
groupedApiRepos,
(groupedApiRepo) => item.apiRepo.id === groupedApiRepo.apiRepo.id
);
if (findedApiRepo) {
// 如果已存在,则再查看版本是否添加
const indexApiRepoVersion = findIndex(
findedApiRepo.apiRepoVersions,
(version) => version.id === item.apiRepoVersion.id
);
if (indexApiRepoVersion === -1) {
findedApiRepo.apiRepoVersions.push(item.apiRepoVersion);
}
} else {
// groupedApiRepos 中不存在时,追加
groupedApiRepos.push({ apiRepo: item.apiRepo, apiRepoVersions: [item.apiRepoVersion] });
}
});
return [
v('div', {}, [v('strong', ['API'])]),
v(
'div',
{ classes: [c.pl_4, c.border_left] },
groupedApiRepos.map((item) =>
v('div', {}, [
// 当前只支持 git
w(FontAwesomeIcon, { icon: ['fab', 'git-alt'], classes: [c.text_muted], title: 'git 仓库' }),
v(
'a',
{
target: '_blank',
href: `${item.apiRepo.gitRepoUrl}`,
title: '跳转到 API 仓库',
classes: [c.ml_1],
},
[`${item.apiRepo.gitRepoOwner}/${item.apiRepo.gitRepoName}`]
),
v(
'span',
{ classes: [c.ml_3] },
item.apiRepoVersions.map((version) =>
v('span', { classes: [c.mr_1, c.badge, c.badge_secondary] }, [`${version.version}`])
)
),
])
)
),
];
}
private _renderDevComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const devDependences = dependences.filter((dependence) => dependence.component | ViewProjectDependence | identifier_name |
|
ViewProjectDependence.ts | { latestCommitInfo } = this.properties;
return v('div', { classes: [c.card, !latestCommitInfo ? c.border_top_0 : undefined] }, [
w(LatestCommitInfo, { latestCommitInfo, showBottomBorder: true }), // 最近提交信息区
this._renderDependenceEditor(),
]);
}
private _renderDependenceEditor() {
return v('div', { classes: [c.card_body] }, [
this._renderComponentRepoSearchPart(),
// 显示项目依赖
// 1. 如果没有依赖,则显示提示信息
// 2. 否则显示依赖
this._renderDependencePart(),
]);
}
private _renderComponentRepoSearchPart() {
return v('div', { classes: [c.py_4, c.border_bottom] }, [
this._renderSearchForm(),
this._renderSearchTip(),
this._renderSearchedComponentRepos(),
]);
}
private _renderSearchForm() {
const {
messages: { componentSearchForProjectPlaceholder },
} = this._localizedMessages;
return v('form', {}, [
v('div', { classes: [c.form_group] }, [
v('input', {
type: 'text',
classes: [c.form_control],
placeholder: `${componentSearchForProjectPlaceholder}`,
oninput: this._onSearchComponentRepo,
value: `${this._search}`,
}),
]),
]);
}
private _renderSearchTip() {
if (this._search === '') {
return;
}
const { pagedComponentRepos } = this.properties;
let length = 0;
if (pagedComponentRepos && pagedComponentRepos.content) {
length = pagedComponentRepos.content.length;
}
return v('div', { classes: [c.d_flex, c.justify_content_between, c.align_items_center, c.border_bottom] }, [
v('div', [
'使用 ',
v('strong', [`${this._search}`]),
' 共查出 ',
v('strong', [`${length}`]),
' 个组件仓库',
]),
v('div', [
v(
'button',
{
classes: [c.btn, c.btn_link, c.btn_sm, css.btnLink],
onclick: this._onClearSearchText,
},
[w(FontAwesomeIcon, { icon: 'times', classes: [c.mr_1] }), '清空搜索条件']
),
]),
]);
}
private _onClearSearchText() {
this._search = '';
this.properties.onQueryComponentRepos({ query: this._search });
}
private _onSearchComponentRepo({ target: { value: query } }: WithTarget) {
this._search = query;
this.properties.onQueryComponentRepos({ query });
}
private _renderSearchedComponentRepos(): DNode {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
if (pagedComponentRepos.content.length === 0) {
return this._renderEmptyComponentRepo();
}
return v('div', { key: 'component-repos-part', classes: [] }, [
// 组件库列表
this._renderComponentRepos(),
// 分页
this._renderPagination(),
]);
}
private _renderEmptyComponentRepo() {
return v(
'div',
{
key: 'no-component-repos',
classes: [c.alert, c.alert_secondary, c.mx_auto, c.text_center, c.mt_3, c.py_4],
},
[v('strong', {}, ['没有查到组件仓库'])]
);
}
private _renderComponentRepos() {
const { repository, pagedComponentRepos, dependences = [], onAddDependence } = this.properties;
return v(
'ul',
{ classes: [c.list_group, c.mt_2] },
pagedComponentRepos.content.map((item) => {
const used =
findIndex(dependences, (dependence) => item.componentRepo.id === dependence.componentRepo.id) > -1;
return w(ComponentRepoItem, {
repository,
componentRepoInfo: item,
used,
onAddDependence,
});
})
);
}
private _renderPagination() {
const { pagedComponentRepos } = this.properties;
if (!pagedComponentRepos) {
return;
}
const { first, last, size, number, totalPages } = pagedComponentRepos;
return w(Pagination, {
totalPages,
first,
last,
number,
size,
});
}
private _renderDependencePart() {
const { dependences = [] } = this.properties;
if (dependences.length === 0) {
return this._renderNoDependenceMessage();
}
return this._renderDependenceItems();
}
private _renderDependenceItems() {
return v('div', { key: 'dependence-items', classes: [c.mt_4] }, [
...this._renderApiRepos(),
...this._renderDevComponentRepos(),
...this._renderBuildComponentRepos(),
]);
}
private _renderApiRepos() {
const { dependences = [] } = this.properties;
const groupedApiRepos: GroupedApiRepo[] = [];
dependences.forEach((item) => {
const findedApiRepo = find(
groupedApiRepos,
(groupedApiRepo) => item.apiRepo.id === groupedApiRepo.apiRepo.id
);
if (findedApiRepo) {
// 如果已存在,则再查看版本是否添加
const indexApiRepoVersion = findIndex(
findedApiRepo.apiRepoVersions,
(version) => version.id === item.apiRepoVersion.id
);
if (indexApiRepoVersion === -1) {
findedApiRepo.apiRepoVersions.push(item.apiRepoVersion);
}
} else {
// groupedApiRepos 中不存在时,追加
groupedApiRepos.push({ apiRepo: item.apiRepo, apiRepoVersions: [item.apiRepoVersion] });
}
});
return [
v('div' | // 当前只支持 git
w(FontAwesomeIcon, { icon: ['fab', 'git-alt'], classes: [c.text_muted], title: 'git 仓库' }),
v(
'a',
{
target: '_blank',
href: `${item.apiRepo.gitRepoUrl}`,
title: '跳转到 API 仓库',
classes: [c.ml_1],
},
[`${item.apiRepo.gitRepoOwner}/${item.apiRepo.gitRepoName}`]
),
v(
'span',
{ classes: [c.ml_3] },
item.apiRepoVersions.map((version) =>
v('span', { classes: [c.mr_1, c.badge, c.badge_secondary] }, [`${version.version}`])
)
),
])
)
),
];
}
private _renderDevComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const devDependences = dependences.filter((dependence) => dependence.componentRepo.repoType === RepoType.IDE);
if (devDependences.length === 0) {
return [];
}
return [v('div', {}, [v('strong', ['开发'])]), ...this._renderComponentRepoDependences(devDependences)];
}
private _renderBuildComponentRepos(): DNode[] {
const { dependences = [] } = this.properties;
const buildDependences = dependences.filter(
(dependence) => dependence.componentRepo.repoType === RepoType.PROD
);
if (buildDependences.length === 0) {
return [];
}
return [v('div', {}, [v('strong', ['构建'])]), ...this._renderComponentRepoDependences(buildDependences)];
}
private _renderComponentRepoDependences(dependences: ProjectDependenceData[]): DNode[] {
const { repository, onDeleteDependence, onShowDependenceVersions, onUpdateDependenceVersion } = this.properties;
// 按照 appType 分组
const groupedDependences = lodash.groupBy(dependences, (dependence) => dependence.componentRepoVersion.appType);
const vnodes: DNode[] = [];
for (const key in groupedDependences) {
const values = groupedDependences[key];
vnodes.push(
v('div', { classes: [c.pl_4, c.border_left] }, [
v('div', {}, [`${key}`]),
v(
'div',
| , {}, [v('strong', ['API'])]),
v(
'div',
{ classes: [c.pl_4, c.border_left] },
groupedApiRepos.map((item) =>
v('div', {}, [
| conditional_block |
01_questions.js | At long last, someone invents "the dream VCR." This machine allows you to tape an entire evenings worth of your own dreams, which you can then watch at your leisure. However, the inventor of the dream VCR will only allow you to use this device if you agree to a strange caveat: When you watch your dreams, you must do so with your family and closest friends in the same room. They get to watch your dreams along with you. And if you do not agree to this, you can not use the dream VCR. Would you still do it?',
answer1: 'Absolutely!',
answer2: 'There is no way!',
response1: '0',
response2: '0'
},
{
id: 11,
title: 'A Life In Film',
question:
'For whatever the reason, two unauthorized movies are made about your life. The first is an independently released documentary, primarily comprised of interviews with people who know you and bootleg footage from your actual life. Critics are describing the documentary as "brutally honest and relentlessly fair." Meanwhile, Columbia TriStar has produced a big-budget biopic of your life, casting major Hollywood stars as you and all of your acquaintances; though the movie is based on actual events, screenwriters have taken some liberties with the facts. Critics are split on the artistic merits of this fictionalized account, but audiences love it. Which film would you be more interested in seeing?',
answer1: 'The Documentary!',
answer2: 'The Feature Film!',
response1: '1',
response2: '1'
},
{
id: 12,
title: 'Shaquille in the Shower',
question:
'You come home from an afternoon of shopping, expecting your residence to be empty. However, upon entering your front door, you immediately sense that something is strange: The entire place smells like marijuana and roses. There is a briefcase sitting in the middle of your living room floor, filled with diamonds and Christmas cookies. You can hear the shower running, and -- when you open the door to the bathroom -- you realize that the man using the shower is basketball legend Shaquille ONeal. A naked Shaq peers at you from behind the shower curtain and smiles enthusiastically, but says nothing. He then returns to washing himself. When you ask ONeal what he is doing in your home, he simply says, "I do not remember." Do you call the police?',
answer1: 'He can stay!',
answer2: 'Call the police!',
response1: '0',
response2: '0'
},
{
id: 13,
title: 'Lost Virginity Redux',
question:
'Imagine you could go back to the age of five and relive the rest of your life, knowing everything that you know now. You will re-experience your entire adolescence with both the cognitive ability of an adult and the memories of everything you have learned from having lived your life previously. Would you lose your virginity earlier or later than you did the first time around?',
answer1: 'Earlier',
answer2: 'Later',
response1: '0',
response2: '0'
},
{
id: 14,
title: 'Rudimentary Magician',
question:
'Let us assume you met a rudimentary magician. Let us also assume that he can do five simple tricks: He can pull a rabbit out of his hat, he can make a coin disappear, he can turn the Ace of Spades into the Joker card, and he can do two others in a similar vein. These are his only tricks and he can not learn any more; he can only do these five. However, it turns out that he is doing these five tricks with real magic. It is not an illusion; he can actually conjure the bunny out of the ether and he can move the coin through space. He is legitimately magical, but extremely limited in scope and influence. Would this person be more impressive to you than Albert Einstein?',
answer1: 'Of Course!',
answer2: 'Nope!',
response1: '0',
response2: '0'
},
{
id: 15,
title: 'The Nude Acquaintance',
question:
'Think of a specific friend of yours -- not your best friend, but someone who is (a) more than an acquaintance and (b) physically attractive. One day, while trolling the Internet, you accidentally come across three nude photographs of this person, reclined on a sofa bed. The pictures were clearly taken several years ago, and it is unclear if they photos were casually posed or taken without the subjects knowledge (your friend looks comfortable, but he/she is never looking directly into the lens of the camera). The pictures are not labeled, and your friends name is not listed anywhere on the site -- but you are certain that this is the same person you know. What do you do with this information? Do you tell anyone?',
answer1: 'Yes!',
answer2: 'No!',
response1: '0',
response2: '0'
},
{
id: 16,
title: 'The Moon Fight',
question:
'A total stranger picks a fight with you in a bar. This stranger is exactly your size and weight. You have done nothing to this individual to warrant such animosity, but the stranger really wants to fight you. And to make matters weirder, this stranger wants to fight you on the moon (and this is somehow possible). You and this stranger will be transported to the surface of the moon, outfitted in ultra-thin (and very comfortable) space suits, and rigged with lightweight oxygen tanks. You will then be expected to fight for 10 three-minute rounds. You can not use weapons, there are no rules, and you cannot quit (unless you or your opponent are knocked unconscious). Do you accept this challenge?',
answer1: 'Bring it on!',
answer2: 'Absolutely not!',
response1: '0',
response2: '0'
},
{
id: 17,
title: 'Artistic Telekinesis',
question:
'Let us assume that you have the ability to telekinetically change culture while you actively experience it. Your mind can now dictate what you see and hear. For example, if you were listening to Pearl Jam and you wanted the music to be heavier, it would immediately sound as though the guitars had been tuned differently in the studio. If you were watching The Office on NBC and decided that Jim should marry Pam (or Karen, or both), you could make it happen all you would need to do is think about that specific desire. You could stare at an oil painting and unconsciously change the color contrasts. You could (essentially) write books as you read them, eliminating certain characters and redirecting plot points as they occurred in the text. However, such changes would only apply to your experience; you could kill off Han Solo at the end of Return of the Jedi, but that would not change the movie for anyone else. If this became reality would art retain any meaning whatsoever?',
answer1: 'It would retain meaning!',
answer2: 'No it would not!',
response1: '0',
response2: '0'
},
{
id: 18,
title: 'The Industry Standard',
question:
'You are inside a very peculiar rock club: the manager of this club demands that all his musical acts must take an extensive IQ test before he will allow them to perform. Tonight there are two acts on the bill, and they coincidentally share the same band name, The Industry Standard. Both bands are alleged to be awesome. Sadly, you only have one hour to spend at the club, and the intermission between the two acts is very long (so you cannot watch both). You ask the manager which version of The Industry Standard you should watch. "I have no idea," he says. "But I will tell you this: The first band had the highest test scores I have ever seen, anywhere. Each member is technically a genius. Conversely, the band playing second had some of the worst scores ever recorded. One member might actually be illiterate. However, I halfway suspect they were all drunk and mocking the entire process. I could not tell for sure." Which version of The Industry Standard do you decide to see?',
answer1: 'The Geniuses!',
answer2: 'The Possibly Illiterate!',
response1: '0',
response2: '0'
},
{
id: 19,
title: 'Collarbones n Chains',
question:
'You meet your soul mate. However, there is a catch: Every three years, someone will break both of your soul mates collarbones with a Crescent wrench, and there is only one way you can stop this from happening: You must swallow a pill that will make every song you hear -- for the rest of your life -- sound as if it is being performed by the band Alice in Chains. When you hear Creedence Clearwater Revival on the radio, it will sound (to your ears) like it is being played by Alice in Chains. If you see Radiohead live, every one of their tunes will sound like it is being covered by Alice in Chains. When you hear a commercial jingle on TV, it will sound like Alice in Chains; if you sing to yourself in the shower, your voice will sound like deceased Alice in Chains vocalist Layne Staley performing a cappella (but it will only sound this way to you). Would you swallow the pill?',
answer1: 'Sounds good to me!', | answer2: 'Dealbreaker!', | random_line_split |
|
SgScriptEngine.py | backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
curWord += c
elif c == ' ':
if len(curWord) <= 0:
continue
if curWord.endswith('(') and backwardQuoteCount <= 0:
continue
if backwardQuoteCount >= 1 or not curWord.endswith(' '):
curWord += c
else:
curWord += c
if backwardParenCount != 0:
raise SgScriptError('"%s" missing closing parentheses' % curWord)
result = curWord.strip()
if backwardQuoteCount >= 1:
raise SgScriptError('"%s" missing closing quote' % curWord)
#ShotgunORM.LoggerScriptEngine.debug('ShotgunORM.SgScriptEngine.cleanSearchExp(...)')
#ShotgunORM.LoggerScriptEngine.debug(' * before: "%(searchExp)s"', {'searchExp': sgSearchExp})
#ShotgunORM.LoggerScriptEngine.debug(' * after: "%(searchExp)s"', {'searchExp': result})
return result
def buildSearchExpSpan(sgSearchExp):
'''
Returns the next span in a search expression.
'''
if sgSearchExp.startswith(OP_AND):
return OP_AND
elif sgSearchExp.startswith(OP_OR):
return OP_OR
if sgSearchExp.startswith('('):
backwardParenCount = 0
backwardQuoteCount = 0
index = -1
for c in sgSearchExp:
index += 1
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
if backwardParenCount == 0:
break
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
result = sgSearchExp[:index + 1]
if result.endswith(' and)') or result.endswith(' or)'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
else:
backwardParenCount = 1
backwardQuoteCount = 0
curWord = ''
for c in sgSearchExp:
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
if backwardQuoteCount <= 0 and len(curWord) >= 4:
if curWord.endswith(OP_AND):
curWord = curWord[:-5]
break
elif curWord.endswith(OP_OR):
curWord = curWord[:-4]
break
curWord += c
result = curWord
if result.endswith(' and') or result.endswith(' or'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
def splitSearchExp(sgSearchExp):
'''
Splits a search expression into its spans.
'''
searchPattern = sgSearchExp
result = []
while len(searchPattern) >= 1:
span = buildSearchExpSpan(searchPattern)
searchPattern = searchPattern[len(span):]
result.append(span)
return result
EXPRESSION_SUPPORTS_IN = [
ShotgunORM.SgField.RETURN_TYPE_DATE,
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME,
ShotgunORM.SgField.RETURN_TYPE_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_FLOAT,
ShotgunORM.SgField.RETURN_TYPE_INT,
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_TEXT
]
SCRIPT_FIELDS = {
ShotgunORM.SgField.RETURN_TYPE_CHECKBOX: ShotgunORM.SgScriptFieldCheckbox(),
ShotgunORM.SgField.RETURN_TYPE_COLOR: ShotgunORM.SgScriptFieldColor(),
ShotgunORM.SgField.RETURN_TYPE_COLOR2: ShotgunORM.SgScriptFieldColor2(),
ShotgunORM.SgField.RETURN_TYPE_DATE: ShotgunORM.SgScriptFieldDate(),
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME: ShotgunORM.SgScriptFieldDateTime(),
ShotgunORM.SgField.RETURN_TYPE_ENTITY: ShotgunORM.SgScriptFieldEntity(),
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY: ShotgunORM.SgScriptFieldEntityMulti(),
ShotgunORM.SgField.RETURN_TYPE_FLOAT: ShotgunORM.SgScriptFieldFloat(),
ShotgunORM.SgField.RETURN_TYPE_INT: ShotgunORM.SgScriptFieldInt(),
ShotgunORM.SgField.RETURN_TYPE_LIST: ShotgunORM.SgScriptFieldSelectionList(),
ShotgunORM.SgField.RETURN_TYPE_TAG_LIST: ShotgunORM.SgScriptFieldTagList(),
ShotgunORM.SgField.RETURN_TYPE_TEXT: ShotgunORM.SgScriptFieldText(),
}
def buildSearchExpFilter(sgEntityFieldInfos, sgArgs, sgSearchExpSpan):
|
for c in sgSearchExpSpan:
if c in [' ', '.', '=', '<', '>', '!']:
break
index += 1
fieldName = sgSearchExpSpan[:index]
try:
fieldInfo = sgEntityFieldInfos[fieldName]
except KeyError:
raise SgScriptError('"%s" invalid field name' % fieldName)
try:
scriptField = SCRIPT_FIELDS[fieldInfo.returnType()]
except AttributeError:
raise SgScriptError('field "%s" contains no scriptfield operator' % fieldName)
globalEnv = {}
localEnv = {
'argv': sgArgs,
fieldName: scriptField
}
# Python is lame as shit and doesnt return the value of calling __contains__
# on a class. If __contains__ returns anything other then None, False
# it returns True. So we cant use our wizardy with the script field class :(
#
# Correction for this problem follows.
if fieldInfo.returnType() in EXPRESSION_SUPPORTS_IN:
inString = '%s in ' % fieldName
if sgSearchExpSpan.startswith(inString):
a, b = sgSearchExpSpan.split(inString, 1)
sgSearchExpSpan = '%s._in(%s)' % (fieldName, b)
try:
expResult = eval(sgSearchExpSpan, globalEnv, localEnv)
except Exception, e:
raise SgScriptError('"%s" %s' % (sgSearchExpSpan, e))
if inverse and expResult['neop'] == None:
raise SgScriptError('%s does not contain a not equal function' % sgSearchExpSpan)
logicalCond = {
'path' : fieldName,
'relation' : None,
'values' : expResult['value']
}
if not isinstance(logicalCond['values'], (list, tuple)):
logicalCond['values'] = [logicalCond['values']]
if inverse:
logicalCond['relation'] = expResult['neop']
else:
logicalCond['relation'] = expResult['op']
return [logicalCond]
def buildSearchExpFilters(sgEntityFieldInfos, sgArgs, sgSearchExpSpans):
'''
Builds the locial operator pattern from a search expression
'''
ShotgunORM.LoggerScriptEngine.debug(' + Parsing spans: %(sgSearchExpSpans)s', {'sgSearchExpSpans': sgSearchExpSpans})
logicalConds = []
logicalOp = {'logical_operator': None, 'conditions': logicalConds}
if len(sgSearchExpSpans) <= 0:
raise SgScriptError('empty search expression span')
if sgSearchExpSpans[0] in [OP_AND, OP_OR]:
raise SgScriptError('"%s" invalid search expression' % ' '.join(sgSearchExpSpans))
if len(sgSearchExpSpans) == 1:
span = sgSearchExpSpans[0]
if span.startswith('('):
while span.startswith('(') and span.endswith(')'):
span = span[1:-1]
return buildSearchExpFilters(
sgEntityFieldInfos,
sgArgs,
splitSearchExp(span)
)
curOp = None
for span in sgSearchExpSpans:
if span in [OP_AND, OP_OR]:
curOp = span
if curOp == OP_AND:
logicalOp['logical_operator'] = OP_AND_STRIP
else:
logicalOp['logical_operator'] = OP_OR_STRIP
break
if logicalOp['logical_operator'] == None:
if len(sgSearchExpSpans) >= | '''
Builds a logical operator from a search expression span.
'''
if len(sgSearchExpSpan) <= 0:
raise SgScriptError('search expression span empty')
ShotgunORM.LoggerScriptEngine.debug(' - Parsing sub-span: "%(sgSearchExpSpan)s"', {'sgSearchExpSpan': sgSearchExpSpan})
inverse = sgSearchExpSpan.startswith('!')
if inverse:
sgSearchExpSpan = sgSearchExpSpan[1:]
else:
if sgSearchExpSpan.startswith(' not '):
inverse = True
sgSearchExpSpan = sgSearchExpSpan[5:]
index = 0 | identifier_body |
SgScriptEngine.py | backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
curWord += c
elif c == ' ':
if len(curWord) <= 0:
continue
| if curWord.endswith('(') and backwardQuoteCount <= 0:
continue
if backwardQuoteCount >= 1 or not curWord.endswith(' '):
curWord += c
else:
curWord += c
if backwardParenCount != 0:
raise SgScriptError('"%s" missing closing parentheses' % curWord)
result = curWord.strip()
if backwardQuoteCount >= 1:
raise SgScriptError('"%s" missing closing quote' % curWord)
#ShotgunORM.LoggerScriptEngine.debug('ShotgunORM.SgScriptEngine.cleanSearchExp(...)')
#ShotgunORM.LoggerScriptEngine.debug(' * before: "%(searchExp)s"', {'searchExp': sgSearchExp})
#ShotgunORM.LoggerScriptEngine.debug(' * after: "%(searchExp)s"', {'searchExp': result})
return result
def buildSearchExpSpan(sgSearchExp):
'''
Returns the next span in a search expression.
'''
if sgSearchExp.startswith(OP_AND):
return OP_AND
elif sgSearchExp.startswith(OP_OR):
return OP_OR
if sgSearchExp.startswith('('):
backwardParenCount = 0
backwardQuoteCount = 0
index = -1
for c in sgSearchExp:
index += 1
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
if backwardParenCount == 0:
break
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
result = sgSearchExp[:index + 1]
if result.endswith(' and)') or result.endswith(' or)'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
else:
backwardParenCount = 1
backwardQuoteCount = 0
curWord = ''
for c in sgSearchExp:
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
if backwardQuoteCount <= 0 and len(curWord) >= 4:
if curWord.endswith(OP_AND):
curWord = curWord[:-5]
break
elif curWord.endswith(OP_OR):
curWord = curWord[:-4]
break
curWord += c
result = curWord
if result.endswith(' and') or result.endswith(' or'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
def splitSearchExp(sgSearchExp):
'''
Splits a search expression into its spans.
'''
searchPattern = sgSearchExp
result = []
while len(searchPattern) >= 1:
span = buildSearchExpSpan(searchPattern)
searchPattern = searchPattern[len(span):]
result.append(span)
return result
EXPRESSION_SUPPORTS_IN = [
ShotgunORM.SgField.RETURN_TYPE_DATE,
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME,
ShotgunORM.SgField.RETURN_TYPE_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_FLOAT,
ShotgunORM.SgField.RETURN_TYPE_INT,
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_TEXT
]
SCRIPT_FIELDS = {
ShotgunORM.SgField.RETURN_TYPE_CHECKBOX: ShotgunORM.SgScriptFieldCheckbox(),
ShotgunORM.SgField.RETURN_TYPE_COLOR: ShotgunORM.SgScriptFieldColor(),
ShotgunORM.SgField.RETURN_TYPE_COLOR2: ShotgunORM.SgScriptFieldColor2(),
ShotgunORM.SgField.RETURN_TYPE_DATE: ShotgunORM.SgScriptFieldDate(),
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME: ShotgunORM.SgScriptFieldDateTime(),
ShotgunORM.SgField.RETURN_TYPE_ENTITY: ShotgunORM.SgScriptFieldEntity(),
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY: ShotgunORM.SgScriptFieldEntityMulti(),
ShotgunORM.SgField.RETURN_TYPE_FLOAT: ShotgunORM.SgScriptFieldFloat(),
ShotgunORM.SgField.RETURN_TYPE_INT: ShotgunORM.SgScriptFieldInt(),
ShotgunORM.SgField.RETURN_TYPE_LIST: ShotgunORM.SgScriptFieldSelectionList(),
ShotgunORM.SgField.RETURN_TYPE_TAG_LIST: ShotgunORM.SgScriptFieldTagList(),
ShotgunORM.SgField.RETURN_TYPE_TEXT: ShotgunORM.SgScriptFieldText(),
}
def buildSearchExpFilter(sgEntityFieldInfos, sgArgs, sgSearchExpSpan):
'''
Builds a logical operator from a search expression span.
'''
if len(sgSearchExpSpan) <= 0:
raise SgScriptError('search expression span empty')
ShotgunORM.LoggerScriptEngine.debug(' - Parsing sub-span: "%(sgSearchExpSpan)s"', {'sgSearchExpSpan': sgSearchExpSpan})
inverse = sgSearchExpSpan.startswith('!')
if inverse:
sgSearchExpSpan = sgSearchExpSpan[1:]
else:
if sgSearchExpSpan.startswith(' not '):
inverse = True
sgSearchExpSpan = sgSearchExpSpan[5:]
index = 0
for c in sgSearchExpSpan:
if c in [' ', '.', '=', '<', '>', '!']:
break
index += 1
fieldName = sgSearchExpSpan[:index]
try:
fieldInfo = sgEntityFieldInfos[fieldName]
except KeyError:
raise SgScriptError('"%s" invalid field name' % fieldName)
try:
scriptField = SCRIPT_FIELDS[fieldInfo.returnType()]
except AttributeError:
raise SgScriptError('field "%s" contains no scriptfield operator' % fieldName)
globalEnv = {}
localEnv = {
'argv': sgArgs,
fieldName: scriptField
}
# Python is lame as shit and doesnt return the value of calling __contains__
# on a class. If __contains__ returns anything other then None, False
# it returns True. So we cant use our wizardy with the script field class :(
#
# Correction for this problem follows.
if fieldInfo.returnType() in EXPRESSION_SUPPORTS_IN:
inString = '%s in ' % fieldName
if sgSearchExpSpan.startswith(inString):
a, b = sgSearchExpSpan.split(inString, 1)
sgSearchExpSpan = '%s._in(%s)' % (fieldName, b)
try:
expResult = eval(sgSearchExpSpan, globalEnv, localEnv)
except Exception, e:
raise SgScriptError('"%s" %s' % (sgSearchExpSpan, e))
if inverse and expResult['neop'] == None:
raise SgScriptError('%s does not contain a not equal function' % sgSearchExpSpan)
logicalCond = {
'path' : fieldName,
'relation' : None,
'values' : expResult['value']
}
if not isinstance(logicalCond['values'], (list, tuple)):
logicalCond['values'] = [logicalCond['values']]
if inverse:
logicalCond['relation'] = expResult['neop']
else:
logicalCond['relation'] = expResult['op']
return [logicalCond]
def buildSearchExpFilters(sgEntityFieldInfos, sgArgs, sgSearchExpSpans):
'''
Builds the locial operator pattern from a search expression
'''
ShotgunORM.LoggerScriptEngine.debug(' + Parsing spans: %(sgSearchExpSpans)s', {'sgSearchExpSpans': sgSearchExpSpans})
logicalConds = []
logicalOp = {'logical_operator': None, 'conditions': logicalConds}
if len(sgSearchExpSpans) <= 0:
raise SgScriptError('empty search expression span')
if sgSearchExpSpans[0] in [OP_AND, OP_OR]:
raise SgScriptError('"%s" invalid search expression' % ' '.join(sgSearchExpSpans))
if len(sgSearchExpSpans) == 1:
span = sgSearchExpSpans[0]
if span.startswith('('):
while span.startswith('(') and span.endswith(')'):
span = span[1:-1]
return buildSearchExpFilters(
sgEntityFieldInfos,
sgArgs,
splitSearchExp(span)
)
curOp = None
for span in sgSearchExpSpans:
if span in [OP_AND, OP_OR]:
curOp = span
if curOp == OP_AND:
logicalOp['logical_operator'] = OP_AND_STRIP
else:
logicalOp['logical_operator'] = OP_OR_STRIP
break
if logicalOp['logical_operator'] == None:
if len(sgSearchExpSpans) >= | random_line_split |
|
SgScriptEngine.py | backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
curWord += c
elif c == ' ':
if len(curWord) <= 0:
continue
if curWord.endswith('(') and backwardQuoteCount <= 0:
continue
if backwardQuoteCount >= 1 or not curWord.endswith(' '):
curWord += c
else:
curWord += c
if backwardParenCount != 0:
raise SgScriptError('"%s" missing closing parentheses' % curWord)
result = curWord.strip()
if backwardQuoteCount >= 1:
raise SgScriptError('"%s" missing closing quote' % curWord)
#ShotgunORM.LoggerScriptEngine.debug('ShotgunORM.SgScriptEngine.cleanSearchExp(...)')
#ShotgunORM.LoggerScriptEngine.debug(' * before: "%(searchExp)s"', {'searchExp': sgSearchExp})
#ShotgunORM.LoggerScriptEngine.debug(' * after: "%(searchExp)s"', {'searchExp': result})
return result
def buildSearchExpSpan(sgSearchExp):
'''
Returns the next span in a search expression.
'''
if sgSearchExp.startswith(OP_AND):
return OP_AND
elif sgSearchExp.startswith(OP_OR):
return OP_OR
if sgSearchExp.startswith('('):
backwardParenCount = 0
backwardQuoteCount = 0
index = -1
for c in sgSearchExp:
index += 1
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
if backwardParenCount == 0:
break
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
result = sgSearchExp[:index + 1]
if result.endswith(' and)') or result.endswith(' or)'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
else:
backwardParenCount = 1
backwardQuoteCount = 0
curWord = ''
for c in sgSearchExp:
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
if backwardQuoteCount <= 0 and len(curWord) >= 4:
if curWord.endswith(OP_AND):
curWord = curWord[:-5]
break
elif curWord.endswith(OP_OR):
curWord = curWord[:-4]
break
curWord += c
result = curWord
if result.endswith(' and') or result.endswith(' or'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
def splitSearchExp(sgSearchExp):
'''
Splits a search expression into its spans.
'''
searchPattern = sgSearchExp
result = []
while len(searchPattern) >= 1:
span = buildSearchExpSpan(searchPattern)
searchPattern = searchPattern[len(span):]
result.append(span)
return result
EXPRESSION_SUPPORTS_IN = [
ShotgunORM.SgField.RETURN_TYPE_DATE,
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME,
ShotgunORM.SgField.RETURN_TYPE_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_FLOAT,
ShotgunORM.SgField.RETURN_TYPE_INT,
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_TEXT
]
SCRIPT_FIELDS = {
ShotgunORM.SgField.RETURN_TYPE_CHECKBOX: ShotgunORM.SgScriptFieldCheckbox(),
ShotgunORM.SgField.RETURN_TYPE_COLOR: ShotgunORM.SgScriptFieldColor(),
ShotgunORM.SgField.RETURN_TYPE_COLOR2: ShotgunORM.SgScriptFieldColor2(),
ShotgunORM.SgField.RETURN_TYPE_DATE: ShotgunORM.SgScriptFieldDate(),
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME: ShotgunORM.SgScriptFieldDateTime(),
ShotgunORM.SgField.RETURN_TYPE_ENTITY: ShotgunORM.SgScriptFieldEntity(),
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY: ShotgunORM.SgScriptFieldEntityMulti(),
ShotgunORM.SgField.RETURN_TYPE_FLOAT: ShotgunORM.SgScriptFieldFloat(),
ShotgunORM.SgField.RETURN_TYPE_INT: ShotgunORM.SgScriptFieldInt(),
ShotgunORM.SgField.RETURN_TYPE_LIST: ShotgunORM.SgScriptFieldSelectionList(),
ShotgunORM.SgField.RETURN_TYPE_TAG_LIST: ShotgunORM.SgScriptFieldTagList(),
ShotgunORM.SgField.RETURN_TYPE_TEXT: ShotgunORM.SgScriptFieldText(),
}
def buildSearchExpFilter(sgEntityFieldInfos, sgArgs, sgSearchExpSpan):
'''
Builds a logical operator from a search expression span.
'''
if len(sgSearchExpSpan) <= 0:
raise SgScriptError('search expression span empty')
ShotgunORM.LoggerScriptEngine.debug(' - Parsing sub-span: "%(sgSearchExpSpan)s"', {'sgSearchExpSpan': sgSearchExpSpan})
inverse = sgSearchExpSpan.startswith('!')
if inverse:
|
else:
if sgSearchExpSpan.startswith(' not '):
inverse = True
sgSearchExpSpan = sgSearchExpSpan[5:]
index = 0
for c in sgSearchExpSpan:
if c in [' ', '.', '=', '<', '>', '!']:
break
index += 1
fieldName = sgSearchExpSpan[:index]
try:
fieldInfo = sgEntityFieldInfos[fieldName]
except KeyError:
raise SgScriptError('"%s" invalid field name' % fieldName)
try:
scriptField = SCRIPT_FIELDS[fieldInfo.returnType()]
except AttributeError:
raise SgScriptError('field "%s" contains no scriptfield operator' % fieldName)
globalEnv = {}
localEnv = {
'argv': sgArgs,
fieldName: scriptField
}
# Python is lame as shit and doesnt return the value of calling __contains__
# on a class. If __contains__ returns anything other then None, False
# it returns True. So we cant use our wizardy with the script field class :(
#
# Correction for this problem follows.
if fieldInfo.returnType() in EXPRESSION_SUPPORTS_IN:
inString = '%s in ' % fieldName
if sgSearchExpSpan.startswith(inString):
a, b = sgSearchExpSpan.split(inString, 1)
sgSearchExpSpan = '%s._in(%s)' % (fieldName, b)
try:
expResult = eval(sgSearchExpSpan, globalEnv, localEnv)
except Exception, e:
raise SgScriptError('"%s" %s' % (sgSearchExpSpan, e))
if inverse and expResult['neop'] == None:
raise SgScriptError('%s does not contain a not equal function' % sgSearchExpSpan)
logicalCond = {
'path' : fieldName,
'relation' : None,
'values' : expResult['value']
}
if not isinstance(logicalCond['values'], (list, tuple)):
logicalCond['values'] = [logicalCond['values']]
if inverse:
logicalCond['relation'] = expResult['neop']
else:
logicalCond['relation'] = expResult['op']
return [logicalCond]
def buildSearchExpFilters(sgEntityFieldInfos, sgArgs, sgSearchExpSpans):
'''
Builds the locial operator pattern from a search expression
'''
ShotgunORM.LoggerScriptEngine.debug(' + Parsing spans: %(sgSearchExpSpans)s', {'sgSearchExpSpans': sgSearchExpSpans})
logicalConds = []
logicalOp = {'logical_operator': None, 'conditions': logicalConds}
if len(sgSearchExpSpans) <= 0:
raise SgScriptError('empty search expression span')
if sgSearchExpSpans[0] in [OP_AND, OP_OR]:
raise SgScriptError('"%s" invalid search expression' % ' '.join(sgSearchExpSpans))
if len(sgSearchExpSpans) == 1:
span = sgSearchExpSpans[0]
if span.startswith('('):
while span.startswith('(') and span.endswith(')'):
span = span[1:-1]
return buildSearchExpFilters(
sgEntityFieldInfos,
sgArgs,
splitSearchExp(span)
)
curOp = None
for span in sgSearchExpSpans:
if span in [OP_AND, OP_OR]:
curOp = span
if curOp == OP_AND:
logicalOp['logical_operator'] = OP_AND_STRIP
else:
logicalOp['logical_operator'] = OP_OR_STRIP
break
if logicalOp['logical_operator'] == None:
if len(sgSearchExpSpans) >= | sgSearchExpSpan = sgSearchExpSpan[1:] | conditional_block |
SgScriptEngine.py | (exceptions.Exception):
'''
General script engine exception.
'''
pass
def cleanSearchExp(sgSearchExp):
'''
Returns the passed search expression cleaned up of extra spaces.
Also throws when closing parentheses and quotes are not present.
'''
backwardParenCount = 0
backwardQuoteCount = 0
index = 0
curWord = ''
for c in sgSearchExp:
if backwardParenCount < 0:
raise SgScriptError('"%s" missing closing parentheses' % curWord)
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
curWord += c
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
if backwardQuoteCount <= 0 and curWord.endswith(' '):
curWord = curWord[:-1] + c
else:
curWord += c
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
curWord += c
elif c == ' ':
if len(curWord) <= 0:
continue
if curWord.endswith('(') and backwardQuoteCount <= 0:
continue
if backwardQuoteCount >= 1 or not curWord.endswith(' '):
curWord += c
else:
curWord += c
if backwardParenCount != 0:
raise SgScriptError('"%s" missing closing parentheses' % curWord)
result = curWord.strip()
if backwardQuoteCount >= 1:
raise SgScriptError('"%s" missing closing quote' % curWord)
#ShotgunORM.LoggerScriptEngine.debug('ShotgunORM.SgScriptEngine.cleanSearchExp(...)')
#ShotgunORM.LoggerScriptEngine.debug(' * before: "%(searchExp)s"', {'searchExp': sgSearchExp})
#ShotgunORM.LoggerScriptEngine.debug(' * after: "%(searchExp)s"', {'searchExp': result})
return result
def buildSearchExpSpan(sgSearchExp):
'''
Returns the next span in a search expression.
'''
if sgSearchExp.startswith(OP_AND):
return OP_AND
elif sgSearchExp.startswith(OP_OR):
return OP_OR
if sgSearchExp.startswith('('):
backwardParenCount = 0
backwardQuoteCount = 0
index = -1
for c in sgSearchExp:
index += 1
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
if backwardParenCount == 0:
break
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
result = sgSearchExp[:index + 1]
if result.endswith(' and)') or result.endswith(' or)'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
else:
backwardParenCount = 1
backwardQuoteCount = 0
curWord = ''
for c in sgSearchExp:
if c == '(':
if backwardQuoteCount <= 0:
backwardParenCount += 1
elif c == ')':
if backwardQuoteCount <= 0:
backwardParenCount -= 1
elif c == '"' or c == "'":
backwardQuoteCount += 1
if backwardQuoteCount >= 2:
backwardQuoteCount = 0
if backwardQuoteCount <= 0 and len(curWord) >= 4:
if curWord.endswith(OP_AND):
curWord = curWord[:-5]
break
elif curWord.endswith(OP_OR):
curWord = curWord[:-4]
break
curWord += c
result = curWord
if result.endswith(' and') or result.endswith(' or'):
raise SgScriptError('"%s" invalid search expression span' % result)
return result
def splitSearchExp(sgSearchExp):
'''
Splits a search expression into its spans.
'''
searchPattern = sgSearchExp
result = []
while len(searchPattern) >= 1:
span = buildSearchExpSpan(searchPattern)
searchPattern = searchPattern[len(span):]
result.append(span)
return result
EXPRESSION_SUPPORTS_IN = [
ShotgunORM.SgField.RETURN_TYPE_DATE,
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME,
ShotgunORM.SgField.RETURN_TYPE_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_FLOAT,
ShotgunORM.SgField.RETURN_TYPE_INT,
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY,
ShotgunORM.SgField.RETURN_TYPE_TEXT
]
SCRIPT_FIELDS = {
ShotgunORM.SgField.RETURN_TYPE_CHECKBOX: ShotgunORM.SgScriptFieldCheckbox(),
ShotgunORM.SgField.RETURN_TYPE_COLOR: ShotgunORM.SgScriptFieldColor(),
ShotgunORM.SgField.RETURN_TYPE_COLOR2: ShotgunORM.SgScriptFieldColor2(),
ShotgunORM.SgField.RETURN_TYPE_DATE: ShotgunORM.SgScriptFieldDate(),
ShotgunORM.SgField.RETURN_TYPE_DATE_TIME: ShotgunORM.SgScriptFieldDateTime(),
ShotgunORM.SgField.RETURN_TYPE_ENTITY: ShotgunORM.SgScriptFieldEntity(),
ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY: ShotgunORM.SgScriptFieldEntityMulti(),
ShotgunORM.SgField.RETURN_TYPE_FLOAT: ShotgunORM.SgScriptFieldFloat(),
ShotgunORM.SgField.RETURN_TYPE_INT: ShotgunORM.SgScriptFieldInt(),
ShotgunORM.SgField.RETURN_TYPE_LIST: ShotgunORM.SgScriptFieldSelectionList(),
ShotgunORM.SgField.RETURN_TYPE_TAG_LIST: ShotgunORM.SgScriptFieldTagList(),
ShotgunORM.SgField.RETURN_TYPE_TEXT: ShotgunORM.SgScriptFieldText(),
}
def buildSearchExpFilter(sgEntityFieldInfos, sgArgs, sgSearchExpSpan):
'''
Builds a logical operator from a search expression span.
'''
if len(sgSearchExpSpan) <= 0:
raise SgScriptError('search expression span empty')
ShotgunORM.LoggerScriptEngine.debug(' - Parsing sub-span: "%(sgSearchExpSpan)s"', {'sgSearchExpSpan': sgSearchExpSpan})
inverse = sgSearchExpSpan.startswith('!')
if inverse:
sgSearchExpSpan = sgSearchExpSpan[1:]
else:
if sgSearchExpSpan.startswith(' not '):
inverse = True
sgSearchExpSpan = sgSearchExpSpan[5:]
index = 0
for c in sgSearchExpSpan:
if c in [' ', '.', '=', '<', '>', '!']:
break
index += 1
fieldName = sgSearchExpSpan[:index]
try:
fieldInfo = sgEntityFieldInfos[fieldName]
except KeyError:
raise SgScriptError('"%s" invalid field name' % fieldName)
try:
scriptField = SCRIPT_FIELDS[fieldInfo.returnType()]
except AttributeError:
raise SgScriptError('field "%s" contains no scriptfield operator' % fieldName)
globalEnv = {}
localEnv = {
'argv': sgArgs,
fieldName: scriptField
}
# Python is lame as shit and doesnt return the value of calling __contains__
# on a class. If __contains__ returns anything other then None, False
# it returns True. So we cant use our wizardy with the script field class :(
#
# Correction for this problem follows.
if fieldInfo.returnType() in EXPRESSION_SUPPORTS_IN:
inString = '%s in ' % fieldName
if sgSearchExpSpan.startswith(inString):
a, b = sgSearchExpSpan.split(inString, 1)
sgSearchExpSpan = '%s._in(%s)' % (fieldName, b)
try:
expResult = eval(sgSearchExpSpan, globalEnv, localEnv)
except Exception, e:
raise SgScriptError('"%s" %s' % (sgSearchExpSpan, e))
if inverse and expResult['neop'] == None:
raise SgScriptError('%s does not contain a not equal function' % sgSearchExpSpan)
logicalCond = {
'path' : fieldName,
'relation' : None,
'values' : expResult['value']
}
if not isinstance(logicalCond['values'], (list, tuple)):
logicalCond['values'] = [logicalCond['values']]
if inverse:
logicalCond['relation'] = expResult['neop']
else:
logicalCond['relation'] = expResult['op']
return [logicalCond]
def buildSearchExpFilters(sgEntityFieldInfos, sgArgs, sgSearchExpSpans):
'''
Builds the locial operator pattern from a search expression
'''
ShotgunORM.LoggerScriptEngine.debug(' + Parsing spans: %(sgSearchExpSpans)s', {'sgSearchExpSpans': sgSearchExpSpans})
logicalConds = []
logicalOp = {'logical_operator': None, 'conditions': logicalConds}
if len(sgSearchExpSpans) <= 0:
raise SgScriptError('empty | SgScriptError | identifier_name |
|
model_abs.py | :", 1.0/rrm.data[1,3])
# TEST (put here the energies and temperature)
E2 = 2.24165620051
E1 = 2.13494501445
kbT = 0.01008086552556262
print("Relaxation time ratio :", rrm.data[2,1]/rrm.data[1,2])
print("... to be compared with :", numpy.exp(-(E2-E1)/kbT))
# In[8]:
rwa = agg.get_RWA_suggestion()
with qr.energy_units("1/cm"):
print(qr.convert(rwa,"int"))
# In[9]:
# absorption from effective theory
from quantarhei import LabSetup
from quantarhei.utils.vectors import X, Y, Z
lab = LabSetup()
lab.set_polarizations(pulse_polarizations=[X,X,X], detection_polarization=X)
agg_eff.diagonalize()
print("\nEffetive model exciation energies:")
print("Energies in 1/cm:")
N1 = agg_eff.nmono
print([qr.convert(agg_eff.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
print("")
mabsc = qr.MockAbsSpectrumCalculator(time, system=agg_eff)
rho0 = agg_eff.get_DensityMatrix(condition_type="thermal", temperature=0.0)
ham = agg_eff.get_Hamiltonian()
pthways = agg_eff.liouville_pathways_1(lab=lab, ham=ham, etol=1.0e-5,
verbose=0)
mabsc.bootstrap(rwa=qr.convert(10000.0,"1/cm","int"),
shape="Gaussian")
mabsc.set_pathways(pthways)
abs1 = mabsc.calculate(raw=False)
abs1.normalize2()
absc = qr.AbsSpectrumCalculator(time, system=agg)
# In[10]:
absc.bootstrap(rwa)
# In[11]:
abss = absc.calculate()
#absexp = qr.load("bas_77K.hdf5") #("DATA/bas_77K.hdf5")
absexp = qr.load_parcel(os.path.join(pre_in, "bas_77K.qrp"))
absexp.normalize()
absexp.subtract(0.086)
absexp.normalize()
abss.normalize2() #norm=0.53)
# In[29]:
#with qr.energy_units("nm"):
# abss.plot(axis=[650, 1000, 0, 0.7], show=False)
# absexp.plot()
plt.figure(0)
with qr.energy_units("1/cm"):
#abss.plot(axis=[10500, 15000, 0, 1.1], show=False)
abs1.plot(axis=[10500, 15000, 0, 1.1], show=False)
absexp.plot(show=True)
absexp.savefig(os.path.join(pre_out, "abs_full.png"))
# in a Notebook, it seems that the figure shows itself always when we leave the cell
# In[30]:
N1 = agg.Nbe[1]
print("Energies in 1/cm:")
print([qr.convert(agg.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
# In[31]:
agg.diagonalize()
# In[32]:
# exciton report
agg.exciton_report(Nrep=8)
# In[33]:
agg.report_on_expansion(2)
# In[34]:
N1 = agg.Nbe[1]
print("Energies in 1/cm:")
print([qr.convert(agg.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
# In[35]:
print("Transition dipoles square:")
print(agg.D2[1:N1+1,0])
#
# ## Fractional model
#
# Remove both H and BL
# In[36]:
#
# Get components of the fractional model
#
indices_of_components = []
names_of_components = ["PM", "PL", "BM"] # , "BL","PCT1", "PCT2"] # "HL", "HM", "BL", , "BCT"
components = []
for name in names_of_components:
indx = agg.get_Molecule_index(name)
mol = agg.get_Molecule_by_name(name)
#if name == "BM":
# mol.elenergies[1] = mol.elenergies[1] + 0.1
indices_of_components.append(indx)
components.append(mol)
print("Indices of selected molecules: ", indices_of_components)
# In[37]:
#
# Coupling matrix
#
Ni = len(indices_of_components)
Jfm = numpy.zeros((Ni, Ni), dtype=qr.REAL)
k_1 = 0
for i_1 in indices_of_components:
k_2 = 0
for i_2 in indices_of_components:
Jfm[k_1, k_2] = agg.resonance_coupling[i_1, i_2]
k_2 += 1
k_1 += 1
# In[38]:
#
# Fractional aggregate
#
frac = qr.Aggregate(components)
frac.set_resonance_coupling_matrix(Jfm)
# In[39]:
fix_dipole = False
if fix_dipole:
BM_fix_dipole = frac.get_Molecule_by_name("BM")
dip = BM_fix_dipole.get_dipole(0, 1)
nrm = qr.norm(dip)
dip2 = qr.normalize2(dip, norm=numpy.sqrt(2.0)*nrm)
BM_fix_dipole.set_dipole(0, 1, dip2)
# In[40]:
#frac.save("fraction_40_4_CT_unbuilt.hdf5")
qr.save_parcel(frac, os.path.join(pre_out,"fraction_40_4_CT_unbuilt.qrp"))
# In[41]:
frac.build()
# In[42]:
absc2 = qr.AbsSpectrumCalculator(time, system=frac)
absc2.bootstrap(rwa)
abss2 = absc2.calculate()
#absexp2 = qr.load("bas_77K.hdf5")
absexp2 = qr.load_parcel(os.path.join(pre_in, "bas_77K.qrp"))
absexp2.normalize()
absexp2.subtract(0.086)
absexp2.normalize()
abss2.normalize2() #norm=0.53)
plt.figure(1)
with qr.energy_units("1/cm"):
abss2.plot(axis=[10500, 15000, 0, 1.1], show=False)
absexp2.plot(show=True)
absexp2.savefig(os.path.join(pre_out, "abs_frac.png"))
# In[43]:
frac.diagonalize()
# In[44]:
frac.report_on_expansion(3)
# In[45]:
HH = frac.get_Hamiltonian()
with qr.eigenbasis_of(HH):
with qr.energy_units("1/cm"):
print([HH.data[i,i] for i in range(1,frac.nmono)])
# In[46]:
#
# Get components of the fractional model
#
indices_of_components = []
names_of_components = ["PM", "PL", "BM", "BL","PCT1", "PCT2"] #["BM", "BL"] # "HL", "HM", "BL", , "BCT"
names_of_components3 = ["PM", "PL", "BL"]
components = []
for name in names_of_components3:
indx = agg_eff.get_Molecule_index(name)
mol = agg_eff.get_Molecule_by_name(name)
#if name == "BM":
# mol.elenergies[1] = mol.elenergies[1] + 0.1
indices_of_components.append(indx)
components.append(mol)
print("Indices of selected molecules: ", indices_of_components)
# In[47]:
#
# Fractional aggregate
#
frac_eff = qr.Aggregate(components)
frac_eff.set_resonance_coupling_matrix(Jfm)
# In[48]:
#frac_B.save("fraction_40_4_B_unbuilt.hdf5")
qr.save_parcel(frac_eff, os.path.join(pre_out,
"fraction_eff_40_4_CT_unbuilt.qrp"))
frac_eff.build()
frac_eff.diagonalize()
mabsc2 = qr.MockAbsSpectrumCalculator(time, system=frac_eff)
rho0 = frac_eff.get_DensityMatrix(condition_type="thermal", temperature=0.0)
ham = frac_eff.get_Hamiltonian()
pthways = frac_eff.liouville_pathways_1(lab=lab, ham=ham, etol=1.0e-5,
verbose=0)
mabsc2.bootstrap(rwa=qr.convert(10000.0,"1/cm","int"),
shape="Gaussian")
mabsc2.set_pathways(pthways)
abs2 = mabsc2.calculate(raw=False)
abs2.normalize2() | random_line_split |
||
model_abs.py |
#
# Model from Jordanides at al. Ref. 1 is adjusted and extended by two CT states
#
#
jordanides = False
if jordanides:
offset = 0.0
offset_P = 0.0 #485.0
offset_P_M = offset_P + 0.0
h_shift = 0.0
sc_H = 1.0
sc_P = 1.0
else:
offset = 275
offset_P = 400 #485.0
offset_P_M = offset_P + 100.0
h_shift = 85.0
sc_H = 0.79
sc_P = 0.75
#
# Molecules
#
with qr.energy_units("1/cm"):
PM = qr.Molecule([0.0, 11610.0+offset_P_M], name="PM")
PL = qr.Molecule([0.0, 11610.0+offset_P], name="PL")
BM = qr.Molecule([0.0, 12220.0+offset], name="BM")
BL = qr.Molecule([0.0, 12370.0+offset], name="BL")
HL = qr.Molecule([0.0, 13020.0+offset-h_shift], name="HL")
HM = qr.Molecule([0.0, 13150.0+offset+h_shift], name="HM")
# CT states are effectively represented as "new molecules" in the system
PCT_M = qr.Molecule([0.0, 15200], name="PCT1")
PCT_L = qr.Molecule([0.0, 13550], name="PCT2") # 13500
#
# Transition dipole moment from Ref. 1 are scaled
#
dPM = numpy.array([ 0.8546, 0.5051, 0.1206])*sc_P
dPL = numpy.array([-0.9649, -0.0250, 0.2613])*sc_P
dHM = numpy.array([ 0.2749, -0.3694, -0.8877])*sc_H
dHL = numpy.array([ 0.0452, -0.9672, -0.2498])*sc_H
PM.set_dipole(0,1, dPM)
PL.set_dipole(0,1, dPL)
BL.set_dipole(0,1, [ 0.7782, 0.5332, 0.3317])
BM.set_dipole(0,1, [-0.9681, 0.1107, 0.2249])
HL.set_dipole(0,1, dHL)
HM.set_dipole(0,1, dHM)
#
# CT states are dark
#
PCT_M.set_dipole(1, 0, [0.0, 0.0, 0.0])
PCT_L.set_dipole(1, 0, [0.0, 0.0, 0.0])
molecules = [PM, PL, BM, BL, HL, HM, PCT_M, PCT_L]
# saving molecules without environment
qr.save_parcel(molecules, os.path.join(pre_out,"molecules.qrp"))
#
# Here we build the RC as an aggregate of molecules
#
mol3 = [PM, PL, BM]
agg = qr.Aggregate(molecules=mol3)
#
# Exciton interaction matrix
#
# values from Ref. 1
JP_77K_Jordanides = 575.0
JP_77K = JP_77K_Jordanides
#
# Fitted values of the model with CT states
# starting values of the manual search of best parameters are
# taken from Ref. 2
#
if jordanides:
JP = 395 #JP_77K
XCT_M = 0.0
XCT_L = 0.0
YCT = 0.0
else:
JP = 690 #575
XCT_M = 905 #1400
XCT_L = 755
YCT = 550 #350
# Factor of three is just to experiment with
PB_1 = -104.0
PB_2 = -94.0
LCT = 0
MCT = 0
# the interaction matrix is taken from
J_Matrix = numpy.array([
[ 0.0, JP, -16.0, PB_1, 19.9, -4.8, XCT_M, YCT],
[ JP, 0.0, PB_2, 2.8, -6.8, 18.0, YCT, XCT_L],
[ -16.0, PB_2, 0.0, 19.3, -7.5, 95.8, MCT, LCT],
[ PB_1, 2.8, 19.3, 0.0, 123.1, -7.9, LCT, MCT],
[ 19.9, -6.8, -7.5, 123.1, 0.0, 3.9, 0.0, 0.0],
[ -4.8, 18.0, 95.8, -7.9, 3.9, 0.0, 0.0, 0.0],
[ XCT_M, YCT, MCT, LCT, 0.0, 0.0, 0.0, 0.0],
[ YCT, XCT_L, LCT, MCT, 0.0, 0.0, 0.0, 0.0]
])
with qr.energy_units("1/cm"):
agg.set_resonance_coupling_matrix(J_Matrix[0:3,0:3])
#agg.save("RC_Model_40_4_adjusted_CT_no_environment_unbuilt.hdf5")
qr.save_parcel(agg, os.path.join(pre_out,
"RC_Model_40_4_adjusted_CT_no_environment_unbuilt.qrp"))
# In[3]:
# check that units were set correctly
rc = agg.resonance_coupling[1,0]
with qr.energy_units("1/cm"):
print(qr.convert(rc, "int"))
with qr.energy_units("1/cm"):
print(agg.get_resonance_coupling(1,0))
# In[4]:
# Bath correlation function
time = qr.TimeAxis(0.0, 1000, 1.0)
cfA_params = dict(ftype="OverdampedBrownian",
reorg=190, cortime=80, T=77, matsubara=100)
cfH_params = dict(ftype="OverdampedBrownian",
reorg=200, cortime=100, T=77, matsubara=100)
cfP_params = dict(ftype="OverdampedBrownian",
reorg=700, cortime=120, T=77, matsubara=100)
cfCT_params = dict(ftype="OverdampedBrownian",
reorg=3600, cortime=20, T=77, matsubara=200)
with qr.energy_units("1/cm"):
cfA = qr.CorrelationFunction(time, cfA_params)
cfH = qr.CorrelationFunction(time, cfH_params)
cfP = qr.CorrelationFunction(time, cfP_params)
cfCT = qr.CorrelationFunction(time, cfCT_params)
PM.set_transition_environment((0,1), cfP)
PL.set_transition_environment((0,1), cfP)
BM.set_transition_environment((0,1), cfA)
BL.set_transition_environment((0,1), cfA)
HL.set_transition_environment((0,1), cfH)
HM.set_transition_environment((0,1), cfH)
PCT_M.set_transition_environment((0,1), cfCT)
PCT_L.set_transition_environment((0,1), cfCT)
agg.build(mult=2)
#agg.save("RC_Model_40_4_adjusted_CT_no_vibrations_built.hdf5")
qr.save_parcel(agg, os.path.join(pre_out,
"RC_Model_40_4_adjusted_CT_no_vibrations_built.qrp"))
# In[5]:
#
# Refitted model of the Reaction Center using effective Gaussian | try:
os.makedirs(pre_out, exist_ok=True)
except:
raise Exception("Output directory name '"
+pre_out+"' does not represent a valid directory") | conditional_block |
|
waypoint_updater.py | about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
TIMEOUT_VALUE = 0.1
ONE_MPH = 0.44704
class WaypointUpdater(object):
def __init__(self):
rospy.loginfo('WaypointUpdater::__init__ - Start')
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# commenting the two below for the time being until clarification about whether
# is needed or not
#rospy.Subscriber('/obstacle_waypoint', , self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.tf_listener = tf.TransformListener()
# The car's current position
self.pose = None
# The maps's complete waypoints
self.waypoints = None
# The car's current velocity
self.velocity = 0.0
# The timestamp of the last traffic_waypoint
self.traffic_waypoint_timestamp = 0.0
# The index of the waypoint in the base_waypoints list, which is closest to the traffic light
self.light_waypoint_index = None
# The approximate distance from the stop line to the traffic light
self.light_distance_thresh = 3.0
# The car's distance to the traffic light when the car started the slowing down process
self.car_distance_to_sl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
# first waypoint index at the previous iteration
self.prev_first_wpt_index = 0
self.default_velocity = rospy.get_param('~velocity', 1) * ONE_MPH
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
first_wpt_index = -1
min_wpt_distance = float('inf')
if self.waypoints is None:
return
num_waypoints_in_list = len(self.waypoints.waypoints)
# Gererate an empty lane to store the final_waypoints
lane = Lane()
lane.header.frame_id = self.waypoints.header.frame_id
lane.header.stamp = rospy.Time(0)
lane.waypoints = []
# Iterate through the complete set of waypoints until we found the closest
distance_decreased = False
#rospy.loginfo('Started at waypoint index: %s', self.prev_first_wpt_index)
#start_time = time.time()
for index, waypoint in enumerate(self.waypoints.waypoints[self.prev_first_wpt_index:] + self.waypoints.waypoints[:self.prev_first_wpt_index], start=self.prev_first_wpt_index):
current_wpt_distance = self.distance(self.pose.pose.position, waypoint.pose.pose.position)
if distance_decreased and current_wpt_distance > min_wpt_distance:
break
if current_wpt_distance > 0 and current_wpt_distance < min_wpt_distance:
min_wpt_distance = current_wpt_distance
first_wpt_index = index
distance_decreased = True
first_wpt_index %= num_waypoints_in_list
transformed_light_point = None
if first_wpt_index == -1:
rospy.logwarn('WaypointUpdater::waypoints_cb - No waypoints ahead of ego were found... seems that the car went off course')
else:
#transform fast avoiding wait cycles
# Transform first waypoint to car coordinates
self.waypoints.waypoints[first_wpt_index].pose.header.frame_id = self.waypoints.header.frame_id
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(0.02))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(TIMEOUT_VALUE))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logwarn("Failed to find camera to map transform")
return
# All waypoints in front of the car should have positive X coordinate in car coordinate frame
# If the closest waypoint is behind the car, skip this waypoint
if transformed_waypoint.pose.position.x <= 0.0:
first_wpt_index += 1
self.prev_first_wpt_index = first_wpt_index % num_waypoints_in_list
# Prepare for calculating velocity:
slow_down = False
reached_zero_velocity = False
car_distance_to_stop_line = -1.
planned_velocity = self.default_velocity
# If the last traffic_waypoint message is newer than the threshold, we might need to the car.
if self.light_waypoint_index >= 0:
rospy.logdebug('should stopp the car %s', self.light_waypoint_index)
self.waypoints.waypoints[self.light_waypoint_index].pose.header.frame_id = self.waypoints.header.frame_id
transformed_light_point = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[self.light_waypoint_index].pose)
# The approximate distance from the stop line to the traffic light
car_distance_to_stop_line = transformed_light_point.pose.position.x - self.light_distance_thresh
# Estimate whether the car cannot cross the stop line on yellow (in less than 2 seconds). Otherwise don't slow down.
if self.velocity / car_distance_to_stop_line < 2 and car_distance_to_stop_line >= 4:
slow_down = True
if self.car_distance_to_sl_when_car_started_to_slow_down is None:
self.car_distance_to_sl_when_car_started_to_slow_down = car_distance_to_stop_line
self.car_velocity_when_car_started_to_slow_down = self.velocity
rospy.logdebug('Stopping the car')
planned_velocity = min(max(abs(car_distance_to_stop_line*0.2),0.0),self.default_velocity)
# Stop the car in a safe distance before the stop line to give the simulator space to adapt velocity
#we are close to the stop line and slow
elif car_distance_to_stop_line > 0 and car_distance_to_stop_line < 4 and self.velocity < 6:
slow_down = True
if car_distance_to_stop_line > 0.5:
planned_velocity = 1.0
else:
|
else:
rospy.logwarn('too late to stopp the car')
self.car_distance_to_tl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
rospy.loginfo('car_distance_to_stop_line %s velocity %s set to %s',car_distance_to_stop_line,self.velocity,planned_velocity)
# Fill the lane with the final waypoints
for num_wp in range(LOOKAHEAD_WPS):
wp = Waypoint()
wp.pose = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].pose
wp.twist = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].twist
wp.twist.twist.linear.x = planned_velocity
wp.twist.twist.linear.y = 0.0
wp.twist.twist.linear.z = 0.0
wp.twist.twist.angular.x = 0.0
wp.twist.twist.angular.y = 0.0
wp.twist.twist.angular.z = 0.0
lane.waypoints.append(wp)
# finally, publish waypoints as modified on /final_waypoints topic
self.final_waypoints_pub.publish(lane)
def velocity_cb(self, msg):
self.velocity = msg.twist.linear.x
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
def traffic_cb(self, traffic_waypoint):
# Callback for /traffic_waypoint message.
# Store the timestamp and the traffic light position to use them for final_waypoints in waypoints_cb
self.traffic_waypoint_timestamp = time.time()
self.light_waypoint_index = traffic_waypoint.data
# rospy.loginfo("received traffic light %s",self.light_waypoint_index)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self | planned_velocity = 0.0
reached_zero_velocity = True | conditional_block |
waypoint_updater.py | about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
TIMEOUT_VALUE = 0.1
ONE_MPH = 0.44704
class WaypointUpdater(object):
def __init__(self):
rospy.loginfo('WaypointUpdater::__init__ - Start')
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# commenting the two below for the time being until clarification about whether
# is needed or not
#rospy.Subscriber('/obstacle_waypoint', , self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.tf_listener = tf.TransformListener()
# The car's current position
self.pose = None
# The maps's complete waypoints
self.waypoints = None
# The car's current velocity
self.velocity = 0.0
# The timestamp of the last traffic_waypoint
self.traffic_waypoint_timestamp = 0.0
# The index of the waypoint in the base_waypoints list, which is closest to the traffic light
self.light_waypoint_index = None
# The approximate distance from the stop line to the traffic light
self.light_distance_thresh = 3.0
# The car's distance to the traffic light when the car started the slowing down process
self.car_distance_to_sl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
# first waypoint index at the previous iteration
self.prev_first_wpt_index = 0
self.default_velocity = rospy.get_param('~velocity', 1) * ONE_MPH
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
first_wpt_index = -1
min_wpt_distance = float('inf')
if self.waypoints is None:
return
num_waypoints_in_list = len(self.waypoints.waypoints)
# Gererate an empty lane to store the final_waypoints
lane = Lane()
lane.header.frame_id = self.waypoints.header.frame_id
lane.header.stamp = rospy.Time(0)
lane.waypoints = []
# Iterate through the complete set of waypoints until we found the closest
distance_decreased = False
#rospy.loginfo('Started at waypoint index: %s', self.prev_first_wpt_index)
#start_time = time.time()
for index, waypoint in enumerate(self.waypoints.waypoints[self.prev_first_wpt_index:] + self.waypoints.waypoints[:self.prev_first_wpt_index], start=self.prev_first_wpt_index):
current_wpt_distance = self.distance(self.pose.pose.position, waypoint.pose.pose.position)
if distance_decreased and current_wpt_distance > min_wpt_distance:
break
if current_wpt_distance > 0 and current_wpt_distance < min_wpt_distance:
min_wpt_distance = current_wpt_distance
first_wpt_index = index
distance_decreased = True
first_wpt_index %= num_waypoints_in_list
transformed_light_point = None
if first_wpt_index == -1:
rospy.logwarn('WaypointUpdater::waypoints_cb - No waypoints ahead of ego were found... seems that the car went off course')
else:
#transform fast avoiding wait cycles
# Transform first waypoint to car coordinates
self.waypoints.waypoints[first_wpt_index].pose.header.frame_id = self.waypoints.header.frame_id
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(0.02))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(TIMEOUT_VALUE))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logwarn("Failed to find camera to map transform")
return
# All waypoints in front of the car should have positive X coordinate in car coordinate frame
# If the closest waypoint is behind the car, skip this waypoint
if transformed_waypoint.pose.position.x <= 0.0:
first_wpt_index += 1
self.prev_first_wpt_index = first_wpt_index % num_waypoints_in_list
# Prepare for calculating velocity:
slow_down = False
reached_zero_velocity = False
car_distance_to_stop_line = -1.
planned_velocity = self.default_velocity
# If the last traffic_waypoint message is newer than the threshold, we might need to the car.
if self.light_waypoint_index >= 0:
rospy.logdebug('should stopp the car %s', self.light_waypoint_index)
self.waypoints.waypoints[self.light_waypoint_index].pose.header.frame_id = self.waypoints.header.frame_id
transformed_light_point = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[self.light_waypoint_index].pose)
# The approximate distance from the stop line to the traffic light
car_distance_to_stop_line = transformed_light_point.pose.position.x - self.light_distance_thresh
# Estimate whether the car cannot cross the stop line on yellow (in less than 2 seconds). Otherwise don't slow down.
if self.velocity / car_distance_to_stop_line < 2 and car_distance_to_stop_line >= 4:
slow_down = True
if self.car_distance_to_sl_when_car_started_to_slow_down is None:
self.car_distance_to_sl_when_car_started_to_slow_down = car_distance_to_stop_line
self.car_velocity_when_car_started_to_slow_down = self.velocity
rospy.logdebug('Stopping the car')
planned_velocity = min(max(abs(car_distance_to_stop_line*0.2),0.0),self.default_velocity)
# Stop the car in a safe distance before the stop line to give the simulator space to adapt velocity
#we are close to the stop line and slow
elif car_distance_to_stop_line > 0 and car_distance_to_stop_line < 4 and self.velocity < 6:
slow_down = True
if car_distance_to_stop_line > 0.5:
planned_velocity = 1.0
else:
planned_velocity = 0.0
reached_zero_velocity = True
else:
rospy.logwarn('too late to stopp the car')
self.car_distance_to_tl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
rospy.loginfo('car_distance_to_stop_line %s velocity %s set to %s',car_distance_to_stop_line,self.velocity,planned_velocity)
# Fill the lane with the final waypoints
for num_wp in range(LOOKAHEAD_WPS):
wp = Waypoint()
wp.pose = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].pose
wp.twist = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].twist
wp.twist.twist.linear.x = planned_velocity
wp.twist.twist.linear.y = 0.0
wp.twist.twist.linear.z = 0.0
wp.twist.twist.angular.x = 0.0
wp.twist.twist.angular.y = 0.0
wp.twist.twist.angular.z = 0.0
lane.waypoints.append(wp)
# finally, publish waypoints as modified on /final_waypoints topic
self.final_waypoints_pub.publish(lane)
def velocity_cb(self, msg):
|
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
def traffic_cb(self, traffic_waypoint):
# Callback for /traffic_waypoint message.
# Store the timestamp and the traffic light position to use them for final_waypoints in waypoints_cb
self.traffic_waypoint_timestamp = time.time()
self.light_waypoint_index = traffic_waypoint.data
# rospy.loginfo("received traffic light %s",self.light_waypoint_index)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, | self.velocity = msg.twist.linear.x | identifier_body |
waypoint_updater.py | about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
TIMEOUT_VALUE = 0.1
ONE_MPH = 0.44704
class WaypointUpdater(object):
def __init__(self):
rospy.loginfo('WaypointUpdater::__init__ - Start')
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# commenting the two below for the time being until clarification about whether
# is needed or not
#rospy.Subscriber('/obstacle_waypoint', , self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.tf_listener = tf.TransformListener()
# The car's current position
self.pose = None
# The maps's complete waypoints
self.waypoints = None
# The car's current velocity
self.velocity = 0.0
# The timestamp of the last traffic_waypoint
self.traffic_waypoint_timestamp = 0.0
# The index of the waypoint in the base_waypoints list, which is closest to the traffic light
self.light_waypoint_index = None
# The approximate distance from the stop line to the traffic light
self.light_distance_thresh = 3.0
# The car's distance to the traffic light when the car started the slowing down process
self.car_distance_to_sl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
# first waypoint index at the previous iteration
self.prev_first_wpt_index = 0
self.default_velocity = rospy.get_param('~velocity', 1) * ONE_MPH
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
first_wpt_index = -1
min_wpt_distance = float('inf')
if self.waypoints is None:
return
num_waypoints_in_list = len(self.waypoints.waypoints)
# Gererate an empty lane to store the final_waypoints
lane = Lane()
lane.header.frame_id = self.waypoints.header.frame_id
lane.header.stamp = rospy.Time(0)
lane.waypoints = []
# Iterate through the complete set of waypoints until we found the closest
distance_decreased = False
#rospy.loginfo('Started at waypoint index: %s', self.prev_first_wpt_index)
#start_time = time.time()
for index, waypoint in enumerate(self.waypoints.waypoints[self.prev_first_wpt_index:] + self.waypoints.waypoints[:self.prev_first_wpt_index], start=self.prev_first_wpt_index):
current_wpt_distance = self.distance(self.pose.pose.position, waypoint.pose.pose.position)
if distance_decreased and current_wpt_distance > min_wpt_distance:
break
if current_wpt_distance > 0 and current_wpt_distance < min_wpt_distance:
min_wpt_distance = current_wpt_distance
first_wpt_index = index
distance_decreased = True
first_wpt_index %= num_waypoints_in_list
transformed_light_point = None
if first_wpt_index == -1:
rospy.logwarn('WaypointUpdater::waypoints_cb - No waypoints ahead of ego were found... seems that the car went off course')
else:
#transform fast avoiding wait cycles
# Transform first waypoint to car coordinates
self.waypoints.waypoints[first_wpt_index].pose.header.frame_id = self.waypoints.header.frame_id
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(0.02))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(TIMEOUT_VALUE))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logwarn("Failed to find camera to map transform")
return
# All waypoints in front of the car should have positive X coordinate in car coordinate frame
# If the closest waypoint is behind the car, skip this waypoint
if transformed_waypoint.pose.position.x <= 0.0:
first_wpt_index += 1
self.prev_first_wpt_index = first_wpt_index % num_waypoints_in_list
# Prepare for calculating velocity:
slow_down = False
reached_zero_velocity = False
car_distance_to_stop_line = -1.
planned_velocity = self.default_velocity
# If the last traffic_waypoint message is newer than the threshold, we might need to the car.
if self.light_waypoint_index >= 0:
rospy.logdebug('should stopp the car %s', self.light_waypoint_index)
self.waypoints.waypoints[self.light_waypoint_index].pose.header.frame_id = self.waypoints.header.frame_id
transformed_light_point = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[self.light_waypoint_index].pose)
# The approximate distance from the stop line to the traffic light
car_distance_to_stop_line = transformed_light_point.pose.position.x - self.light_distance_thresh
# Estimate whether the car cannot cross the stop line on yellow (in less than 2 seconds). Otherwise don't slow down.
if self.velocity / car_distance_to_stop_line < 2 and car_distance_to_stop_line >= 4:
slow_down = True
if self.car_distance_to_sl_when_car_started_to_slow_down is None:
self.car_distance_to_sl_when_car_started_to_slow_down = car_distance_to_stop_line
self.car_velocity_when_car_started_to_slow_down = self.velocity
rospy.logdebug('Stopping the car')
planned_velocity = min(max(abs(car_distance_to_stop_line*0.2),0.0),self.default_velocity)
# Stop the car in a safe distance before the stop line to give the simulator space to adapt velocity
#we are close to the stop line and slow
elif car_distance_to_stop_line > 0 and car_distance_to_stop_line < 4 and self.velocity < 6:
slow_down = True
if car_distance_to_stop_line > 0.5:
planned_velocity = 1.0
else:
planned_velocity = 0.0
reached_zero_velocity = True
else:
rospy.logwarn('too late to stopp the car')
self.car_distance_to_tl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
rospy.loginfo('car_distance_to_stop_line %s velocity %s set to %s',car_distance_to_stop_line,self.velocity,planned_velocity)
# Fill the lane with the final waypoints
for num_wp in range(LOOKAHEAD_WPS):
wp = Waypoint()
wp.pose = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].pose
wp.twist = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].twist
wp.twist.twist.linear.x = planned_velocity
wp.twist.twist.linear.y = 0.0
wp.twist.twist.linear.z = 0.0
wp.twist.twist.angular.x = 0.0
wp.twist.twist.angular.y = 0.0
wp.twist.twist.angular.z = 0.0
lane.waypoints.append(wp)
# finally, publish waypoints as modified on /final_waypoints topic
self.final_waypoints_pub.publish(lane)
def velocity_cb(self, msg):
self.velocity = msg.twist.linear.x
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
def traffic_cb(self, traffic_waypoint):
# Callback for /traffic_waypoint message.
# Store the timestamp and the traffic light position to use them for final_waypoints in waypoints_cb
self.traffic_waypoint_timestamp = time.time()
self.light_waypoint_index = traffic_waypoint.data
# rospy.loginfo("received traffic light %s",self.light_waypoint_index) | pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, |
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later | random_line_split |
waypoint_updater.py | lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
TIMEOUT_VALUE = 0.1
ONE_MPH = 0.44704
class WaypointUpdater(object):
def __init__(self):
rospy.loginfo('WaypointUpdater::__init__ - Start')
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# commenting the two below for the time being until clarification about whether
# is needed or not
#rospy.Subscriber('/obstacle_waypoint', , self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.tf_listener = tf.TransformListener()
# The car's current position
self.pose = None
# The maps's complete waypoints
self.waypoints = None
# The car's current velocity
self.velocity = 0.0
# The timestamp of the last traffic_waypoint
self.traffic_waypoint_timestamp = 0.0
# The index of the waypoint in the base_waypoints list, which is closest to the traffic light
self.light_waypoint_index = None
# The approximate distance from the stop line to the traffic light
self.light_distance_thresh = 3.0
# The car's distance to the traffic light when the car started the slowing down process
self.car_distance_to_sl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
# first waypoint index at the previous iteration
self.prev_first_wpt_index = 0
self.default_velocity = rospy.get_param('~velocity', 1) * ONE_MPH
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
first_wpt_index = -1
min_wpt_distance = float('inf')
if self.waypoints is None:
return
num_waypoints_in_list = len(self.waypoints.waypoints)
# Gererate an empty lane to store the final_waypoints
lane = Lane()
lane.header.frame_id = self.waypoints.header.frame_id
lane.header.stamp = rospy.Time(0)
lane.waypoints = []
# Iterate through the complete set of waypoints until we found the closest
distance_decreased = False
#rospy.loginfo('Started at waypoint index: %s', self.prev_first_wpt_index)
#start_time = time.time()
for index, waypoint in enumerate(self.waypoints.waypoints[self.prev_first_wpt_index:] + self.waypoints.waypoints[:self.prev_first_wpt_index], start=self.prev_first_wpt_index):
current_wpt_distance = self.distance(self.pose.pose.position, waypoint.pose.pose.position)
if distance_decreased and current_wpt_distance > min_wpt_distance:
break
if current_wpt_distance > 0 and current_wpt_distance < min_wpt_distance:
min_wpt_distance = current_wpt_distance
first_wpt_index = index
distance_decreased = True
first_wpt_index %= num_waypoints_in_list
transformed_light_point = None
if first_wpt_index == -1:
rospy.logwarn('WaypointUpdater::waypoints_cb - No waypoints ahead of ego were found... seems that the car went off course')
else:
#transform fast avoiding wait cycles
# Transform first waypoint to car coordinates
self.waypoints.waypoints[first_wpt_index].pose.header.frame_id = self.waypoints.header.frame_id
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(0.02))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
try:
self.tf_listener.waitForTransform("base_link", "world", rospy.Time(0), rospy.Duration(TIMEOUT_VALUE))
transformed_waypoint = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[first_wpt_index].pose)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logwarn("Failed to find camera to map transform")
return
# All waypoints in front of the car should have positive X coordinate in car coordinate frame
# If the closest waypoint is behind the car, skip this waypoint
if transformed_waypoint.pose.position.x <= 0.0:
first_wpt_index += 1
self.prev_first_wpt_index = first_wpt_index % num_waypoints_in_list
# Prepare for calculating velocity:
slow_down = False
reached_zero_velocity = False
car_distance_to_stop_line = -1.
planned_velocity = self.default_velocity
# If the last traffic_waypoint message is newer than the threshold, we might need to the car.
if self.light_waypoint_index >= 0:
rospy.logdebug('should stopp the car %s', self.light_waypoint_index)
self.waypoints.waypoints[self.light_waypoint_index].pose.header.frame_id = self.waypoints.header.frame_id
transformed_light_point = self.tf_listener.transformPose("base_link", self.waypoints.waypoints[self.light_waypoint_index].pose)
# The approximate distance from the stop line to the traffic light
car_distance_to_stop_line = transformed_light_point.pose.position.x - self.light_distance_thresh
# Estimate whether the car cannot cross the stop line on yellow (in less than 2 seconds). Otherwise don't slow down.
if self.velocity / car_distance_to_stop_line < 2 and car_distance_to_stop_line >= 4:
slow_down = True
if self.car_distance_to_sl_when_car_started_to_slow_down is None:
self.car_distance_to_sl_when_car_started_to_slow_down = car_distance_to_stop_line
self.car_velocity_when_car_started_to_slow_down = self.velocity
rospy.logdebug('Stopping the car')
planned_velocity = min(max(abs(car_distance_to_stop_line*0.2),0.0),self.default_velocity)
# Stop the car in a safe distance before the stop line to give the simulator space to adapt velocity
#we are close to the stop line and slow
elif car_distance_to_stop_line > 0 and car_distance_to_stop_line < 4 and self.velocity < 6:
slow_down = True
if car_distance_to_stop_line > 0.5:
planned_velocity = 1.0
else:
planned_velocity = 0.0
reached_zero_velocity = True
else:
rospy.logwarn('too late to stopp the car')
self.car_distance_to_tl_when_car_started_to_slow_down = None
self.car_velocity_when_car_started_to_slow_down = None
rospy.loginfo('car_distance_to_stop_line %s velocity %s set to %s',car_distance_to_stop_line,self.velocity,planned_velocity)
# Fill the lane with the final waypoints
for num_wp in range(LOOKAHEAD_WPS):
wp = Waypoint()
wp.pose = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].pose
wp.twist = self.waypoints.waypoints[(first_wpt_index + num_wp) % num_waypoints_in_list].twist
wp.twist.twist.linear.x = planned_velocity
wp.twist.twist.linear.y = 0.0
wp.twist.twist.linear.z = 0.0
wp.twist.twist.angular.x = 0.0
wp.twist.twist.angular.y = 0.0
wp.twist.twist.angular.z = 0.0
lane.waypoints.append(wp)
# finally, publish waypoints as modified on /final_waypoints topic
self.final_waypoints_pub.publish(lane)
def velocity_cb(self, msg):
self.velocity = msg.twist.linear.x
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
def traffic_cb(self, traffic_waypoint):
# Callback for /traffic_waypoint message.
# Store the timestamp and the traffic light position to use them for final_waypoints in waypoints_cb
self.traffic_waypoint_timestamp = time.time()
self.light_waypoint_index = traffic_waypoint.data
# rospy.loginfo("received traffic light %s",self.light_waypoint_index)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def | distance | identifier_name |
|
lib.rs | att"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket{
Bracket::Leaf(s.to_string())
}
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
' '|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn spaces() {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
br().sib(br()).sib(br())
)
)
);
assert_eq!(b1,c1);
}
#[test]
fn strings(){
let b1 = Bracket::from_str(r#"matt"dave""#).unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave");
assert_eq!(b1,c1);
let b2 = Bracket::from_str(r#""andy \"hates\" cheese""#).unwrap();
let c2 = lf(r#"andy "hates" cheese"#);
assert_eq!(b2,c2);
}
#[test]
fn errors(){
assert!(Bracket::from_str("peop ( er").is_err());
assert!(Bracket::from_str(r#""poop"#).is_err());
} | random_line_split |
||
lib.rs | Build method
//! let basic1 = Branch(vec![Leaf("hello".to_string()),
//! Branch(vec![Leaf("peter".to_string()),
//! Leaf("dave".to_string())])]);
//!
//! //Chaining Build method
//! let chain1 = br().sib_lf("hello")
//! .sib(br().sib_lf("peter").sib_lf("dave"));
//!
//! assert_eq!(str1,basic1);
//! assert_eq!(str1,chain1);
//! ```
//!
//! It can also handle string input with escapes. Quotes are removed and the string item is
//! considered a single Leaf value;
//!
//! ```
//! use bracket_parse::{Bracket,br,lf};
//! use std::str::FromStr;
//!
//! let bk = Bracket::from_str(r#""hello" 'matt"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket |
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
' '|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn spaces() {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
| {
Bracket::Leaf(s.to_string())
} | identifier_body |
lib.rs | Build method
//! let basic1 = Branch(vec![Leaf("hello".to_string()),
//! Branch(vec![Leaf("peter".to_string()),
//! Leaf("dave".to_string())])]);
//!
//! //Chaining Build method
//! let chain1 = br().sib_lf("hello")
//! .sib(br().sib_lf("peter").sib_lf("dave"));
//!
//! assert_eq!(str1,basic1);
//! assert_eq!(str1,chain1);
//! ```
//!
//! It can also handle string input with escapes. Quotes are removed and the string item is
//! considered a single Leaf value;
//!
//! ```
//! use bracket_parse::{Bracket,br,lf};
//! use std::str::FromStr;
//!
//! let bk = Bracket::from_str(r#""hello" 'matt"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket{
Bracket::Leaf(s.to_string())
}
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
' '|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn | () {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
| spaces | identifier_name |
update.js | .log(`Found versions: \n${versions.map(version => ` ${version}`).join('\n')}\n`);
const pinToVersion = await cli_ux_1.default.prompt('Enter a version to update to');
if (!versions.includes(pinToVersion))
throw new Error(`Version ${pinToVersion} not found in the locally installed versions.`);
if (!await fs.pathExists(path.join(this.clientRoot, pinToVersion))) {
throw new Error(`Version ${pinToVersion} is not already installed at ${this.clientRoot}.`);
}
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
this.debug(`switching to existing version ${pinToVersion}`);
this.updateToExistingVersion(pinToVersion);
this.log();
this.log(`Updating to an already installed version will not update the channel. If autoupdate is enabled, the CLI will eventually be updated back to ${this.channel}.`);
}
else {
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
await this.config.runHook('preupdate', { channel: this.channel });
const manifest = await this.fetchManifest();
this.currentVersion = await this.determineCurrentVersion();
this.updatedVersion = manifest.sha ? `${manifest.version}-${manifest.sha}` : manifest.version;
const reason = await this.skipUpdate();
if (reason)
cli_ux_1.default.action.stop(reason || 'done');
else
await this.update(manifest);
this.debug('tidy');
await this.tidy();
await this.config.runHook('update', { channel: this.channel });
}
this.debug('done');
cli_ux_1.default.action.stop();
}
async fetchManifest() {
const http = require('http-call').HTTP;
cli_ux_1.default.action.status = 'fetching manifest';
if (!this.config.scopedEnvVarTrue('USE_LEGACY_UPDATE')) {
try {
const newManifestUrl = this.config.s3Url(this.s3ChannelManifestKey(this.config.bin, this.config.platform, this.config.arch, this.config.pjson.oclif.update.s3.folder));
const { body } = await http.get(newManifestUrl);
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
this.debug(error.message);
}
}
try {
const url = this.config.s3Url(this.config.s3Key('manifest', {
channel: this.channel,
platform: this.config.platform,
arch: this.config.arch,
}));
const { body } = await http.get(url);
// in case the content-type is not set, parse as a string
// this will happen if uploading without `oclif-dev publish`
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
if (error.statusCode === 403)
throw new Error(`HTTP 403: Invalid channel ${this.channel}`);
throw error;
}
}
async downloadAndExtract(output, manifest, channel) {
const { version } = manifest;
const filesize = (n) => {
const [num, suffix] = require('filesize')(n, { output: 'array' });
return num.toFixed(1) + ` ${suffix}`;
};
const http = require('http-call').HTTP;
const gzUrl = manifest.gz || this.config.s3Url(this.config.s3Key('versioned', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
ext: 'gz',
}));
const { response: stream } = await http.stream(gzUrl);
stream.pause();
const baseDir = manifest.baseDir || this.config.s3Key('baseDir', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
});
const extraction = tar_1.extract(stream, baseDir, output, manifest.sha256gz);
// to-do: use cli.action.type
if (cli_ux_1.default.action.frames) {
// if spinner action
const total = parseInt(stream.headers['content-length'], 10);
let current = 0;
const updateStatus = _.throttle((newStatus) => {
cli_ux_1.default.action.status = newStatus;
}, 250, { leading: true, trailing: false });
stream.on('data', data => {
current += data.length;
updateStatus(`${filesize(current)}/${filesize(total)}`);
});
}
stream.resume();
await extraction;
}
async update(manifest, channel = 'stable') {
const { channel: manifestChannel } = manifest;
if (manifestChannel)
channel = manifestChannel;
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI from ${color_1.default.green(this.currentVersion)} to ${color_1.default.green(this.updatedVersion)}${channel === 'stable' ? '' : ' (' + color_1.default.yellow(channel) + ')'}`);
await this.ensureClientDir();
const output = path.join(this.clientRoot, this.updatedVersion);
if (!await fs.pathExists(output)) {
await this.downloadAndExtract(output, manifest, channel);
}
await this.setChannel();
await this.createBin(this.updatedVersion);
await this.touch();
await this.reexec();
}
async updateToExistingVersion(version) {
await this.createBin(version);
await this.touch();
}
async skipUpdate() {
if (!this.config.binPath) {
const instructions = this.config.scopedEnvVar('UPDATE_INSTRUCTIONS');
if (instructions)
this.warn(instructions);
return 'not updatable';
}
if (this.currentVersion === this.updatedVersion) {
if (this.config.scopedEnvVar('HIDE_UPDATED_MESSAGE'))
return 'done';
return `already on latest version: ${this.currentVersion}`;
}
return false;
}
async determineChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
if (fs.existsSync(channelPath)) {
const channel = await fs.readFile(channelPath, 'utf8');
return String(channel).trim();
}
return this.config.channel || 'stable';
}
async determineCurrentVersion() {
try {
const currentVersion = await fs.readFile(this.clientBin, 'utf8');
const matches = currentVersion.match(/\.\.[/|\\](.+)[/|\\]bin/);
return matches ? matches[1] : this.config.version;
}
catch (error) {
this.debug(error);
}
return this.config.version;
}
s3ChannelManifestKey(bin, platform, arch, folder) {
let s3SubDir = folder || '';
if (s3SubDir !== '' && s3SubDir.slice(-1) !== '/')
s3SubDir = `${s3SubDir}/`;
return path.join(s3SubDir, 'channels', this.channel, `${bin}-${platform}-${arch}-buildmanifest`);
}
async setChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
fs.writeFile(channelPath, this.channel, 'utf8');
}
async logChop() {
try {
this.debug('log chop');
const logChopper = require('log-chopper').default;
await logChopper.chop(this.config.errlog);
}
catch (error) {
this.debug(error.message);
}
}
async mtime(f) {
const { mtime } = await fs.stat(f);
return mtime;
}
// when autoupdating, wait until the CLI isn't active
async debounce() |
// removes any unused CLIs
async tidy() {
try {
const root = this.clientRoot;
if (!await fs.pathExists(root))
return;
const files = await util_1.ls(root);
const promises = files.map(async (f) => {
if (['bin', 'current', this.config.version].includes(path.basename(f.path)))
return;
const mtime = f.stat.mtime;
mtime.setHours(mtime.getHours() + (42 * 24));
if (mtime < new Date()) {
await fs.remove(f.path);
}
});
for (const p of promises)
await p; // eslint-disable-line no-await-in-loop
await this.logChop();
}
catch (error) {
cli_ux_1.default.warn(error);
}
}
async touch() {
// touch the client so it won't be tidied up right away
try {
const p = path.join | {
let output = false;
const lastrunfile = path.join(this.config.cacheDir, 'lastrun');
const m = await this.mtime(lastrunfile);
m.setHours(m.getHours() + 1);
if (m > new Date()) {
const msg = `waiting until ${m.toISOString()} to update`;
if (output) {
this.debug(msg);
}
else {
await cli_ux_1.default.log(msg);
output = true;
}
await util_1.wait(60 * 1000); // wait 1 minute
return this.debounce();
}
cli_ux_1.default.log('time to update');
} | identifier_body |
update.js | .log(`Found versions: \n${versions.map(version => ` ${version}`).join('\n')}\n`);
const pinToVersion = await cli_ux_1.default.prompt('Enter a version to update to');
if (!versions.includes(pinToVersion))
throw new Error(`Version ${pinToVersion} not found in the locally installed versions.`);
if (!await fs.pathExists(path.join(this.clientRoot, pinToVersion))) {
throw new Error(`Version ${pinToVersion} is not already installed at ${this.clientRoot}.`);
}
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
this.debug(`switching to existing version ${pinToVersion}`);
this.updateToExistingVersion(pinToVersion);
this.log();
this.log(`Updating to an already installed version will not update the channel. If autoupdate is enabled, the CLI will eventually be updated back to ${this.channel}.`);
}
else {
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
await this.config.runHook('preupdate', { channel: this.channel });
const manifest = await this.fetchManifest();
this.currentVersion = await this.determineCurrentVersion();
this.updatedVersion = manifest.sha ? `${manifest.version}-${manifest.sha}` : manifest.version;
const reason = await this.skipUpdate();
if (reason)
cli_ux_1.default.action.stop(reason || 'done');
else
await this.update(manifest);
this.debug('tidy');
await this.tidy();
await this.config.runHook('update', { channel: this.channel });
}
this.debug('done');
cli_ux_1.default.action.stop();
}
async fetchManifest() {
const http = require('http-call').HTTP;
cli_ux_1.default.action.status = 'fetching manifest';
if (!this.config.scopedEnvVarTrue('USE_LEGACY_UPDATE')) {
try {
const newManifestUrl = this.config.s3Url(this.s3ChannelManifestKey(this.config.bin, this.config.platform, this.config.arch, this.config.pjson.oclif.update.s3.folder));
const { body } = await http.get(newManifestUrl);
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
this.debug(error.message);
}
}
try {
const url = this.config.s3Url(this.config.s3Key('manifest', {
channel: this.channel,
platform: this.config.platform,
arch: this.config.arch,
}));
const { body } = await http.get(url);
// in case the content-type is not set, parse as a string
// this will happen if uploading without `oclif-dev publish`
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
if (error.statusCode === 403)
throw new Error(`HTTP 403: Invalid channel ${this.channel}`);
throw error;
}
}
async downloadAndExtract(output, manifest, channel) {
const { version } = manifest;
const filesize = (n) => {
const [num, suffix] = require('filesize')(n, { output: 'array' });
return num.toFixed(1) + ` ${suffix}`;
};
const http = require('http-call').HTTP;
const gzUrl = manifest.gz || this.config.s3Url(this.config.s3Key('versioned', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
ext: 'gz',
}));
const { response: stream } = await http.stream(gzUrl);
stream.pause();
const baseDir = manifest.baseDir || this.config.s3Key('baseDir', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
});
const extraction = tar_1.extract(stream, baseDir, output, manifest.sha256gz);
// to-do: use cli.action.type
if (cli_ux_1.default.action.frames) {
// if spinner action
const total = parseInt(stream.headers['content-length'], 10);
let current = 0;
const updateStatus = _.throttle((newStatus) => {
cli_ux_1.default.action.status = newStatus;
}, 250, { leading: true, trailing: false });
stream.on('data', data => {
current += data.length;
updateStatus(`${filesize(current)}/${filesize(total)}`);
});
}
stream.resume();
await extraction;
}
async update(manifest, channel = 'stable') {
const { channel: manifestChannel } = manifest;
if (manifestChannel)
channel = manifestChannel;
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI from ${color_1.default.green(this.currentVersion)} to ${color_1.default.green(this.updatedVersion)}${channel === 'stable' ? '' : ' (' + color_1.default.yellow(channel) + ')'}`);
await this.ensureClientDir();
const output = path.join(this.clientRoot, this.updatedVersion);
if (!await fs.pathExists(output)) {
await this.downloadAndExtract(output, manifest, channel);
}
await this.setChannel();
await this.createBin(this.updatedVersion);
await this.touch();
await this.reexec();
}
async updateToExistingVersion(version) {
await this.createBin(version);
await this.touch();
}
async skipUpdate() {
if (!this.config.binPath) {
const instructions = this.config.scopedEnvVar('UPDATE_INSTRUCTIONS');
if (instructions)
this.warn(instructions);
return 'not updatable';
}
if (this.currentVersion === this.updatedVersion) {
if (this.config.scopedEnvVar('HIDE_UPDATED_MESSAGE'))
return 'done';
return `already on latest version: ${this.currentVersion}`;
}
return false;
}
async determineChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
if (fs.existsSync(channelPath)) {
const channel = await fs.readFile(channelPath, 'utf8');
return String(channel).trim();
}
return this.config.channel || 'stable';
}
async determineCurrentVersion() {
try {
const currentVersion = await fs.readFile(this.clientBin, 'utf8');
const matches = currentVersion.match(/\.\.[/|\\](.+)[/|\\]bin/);
return matches ? matches[1] : this.config.version;
}
catch (error) {
this.debug(error);
}
return this.config.version;
}
s3ChannelManifestKey(bin, platform, arch, folder) {
let s3SubDir = folder || '';
if (s3SubDir !== '' && s3SubDir.slice(-1) !== '/')
s3SubDir = `${s3SubDir}/`;
return path.join(s3SubDir, 'channels', this.channel, `${bin}-${platform}-${arch}-buildmanifest`);
}
async setChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
fs.writeFile(channelPath, this.channel, 'utf8');
}
async logChop() {
try {
this.debug('log chop');
const logChopper = require('log-chopper').default;
await logChopper.chop(this.config.errlog);
}
catch (error) {
this.debug(error.message);
}
}
async mtime(f) {
const { mtime } = await fs.stat(f);
return mtime;
}
// when autoupdating, wait until the CLI isn't active
async | () {
let output = false;
const lastrunfile = path.join(this.config.cacheDir, 'lastrun');
const m = await this.mtime(lastrunfile);
m.setHours(m.getHours() + 1);
if (m > new Date()) {
const msg = `waiting until ${m.toISOString()} to update`;
if (output) {
this.debug(msg);
}
else {
await cli_ux_1.default.log(msg);
output = true;
}
await util_1.wait(60 * 1000); // wait 1 minute
return this.debounce();
}
cli_ux_1.default.log('time to update');
}
// removes any unused CLIs
async tidy() {
try {
const root = this.clientRoot;
if (!await fs.pathExists(root))
return;
const files = await util_1.ls(root);
const promises = files.map(async (f) => {
if (['bin', 'current', this.config.version].includes(path.basename(f.path)))
return;
const mtime = f.stat.mtime;
mtime.setHours(mtime.getHours() + (42 * 24));
if (mtime < new Date()) {
await fs.remove(f.path);
}
});
for (const p of promises)
await p; // eslint-disable-line no-await-in-loop
await this.logChop();
}
catch (error) {
cli_ux_1.default.warn(error);
}
}
async touch() {
// touch the client so it won't be tidied up right away
try {
const p = path.join | debounce | identifier_name |
update.js | this.log(`Found versions: \n${versions.map(version => ` ${version}`).join('\n')}\n`);
const pinToVersion = await cli_ux_1.default.prompt('Enter a version to update to');
if (!versions.includes(pinToVersion))
throw new Error(`Version ${pinToVersion} not found in the locally installed versions.`);
if (!await fs.pathExists(path.join(this.clientRoot, pinToVersion))) {
throw new Error(`Version ${pinToVersion} is not already installed at ${this.clientRoot}.`);
}
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
this.debug(`switching to existing version ${pinToVersion}`);
this.updateToExistingVersion(pinToVersion);
this.log();
this.log(`Updating to an already installed version will not update the channel. If autoupdate is enabled, the CLI will eventually be updated back to ${this.channel}.`);
}
else {
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
await this.config.runHook('preupdate', { channel: this.channel });
const manifest = await this.fetchManifest();
this.currentVersion = await this.determineCurrentVersion();
this.updatedVersion = manifest.sha ? `${manifest.version}-${manifest.sha}` : manifest.version;
const reason = await this.skipUpdate();
if (reason)
cli_ux_1.default.action.stop(reason || 'done');
else
await this.update(manifest);
this.debug('tidy');
await this.tidy();
await this.config.runHook('update', { channel: this.channel });
}
this.debug('done');
cli_ux_1.default.action.stop();
}
async fetchManifest() {
const http = require('http-call').HTTP;
cli_ux_1.default.action.status = 'fetching manifest';
if (!this.config.scopedEnvVarTrue('USE_LEGACY_UPDATE')) {
try {
const newManifestUrl = this.config.s3Url(this.s3ChannelManifestKey(this.config.bin, this.config.platform, this.config.arch, this.config.pjson.oclif.update.s3.folder));
const { body } = await http.get(newManifestUrl);
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
this.debug(error.message);
}
}
try {
const url = this.config.s3Url(this.config.s3Key('manifest', {
channel: this.channel,
platform: this.config.platform,
arch: this.config.arch,
}));
const { body } = await http.get(url);
// in case the content-type is not set, parse as a string
// this will happen if uploading without `oclif-dev publish`
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
if (error.statusCode === 403)
throw new Error(`HTTP 403: Invalid channel ${this.channel}`);
throw error;
}
}
async downloadAndExtract(output, manifest, channel) {
const { version } = manifest;
const filesize = (n) => {
const [num, suffix] = require('filesize')(n, { output: 'array' });
return num.toFixed(1) + ` ${suffix}`;
};
const http = require('http-call').HTTP;
const gzUrl = manifest.gz || this.config.s3Url(this.config.s3Key('versioned', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
ext: 'gz',
}));
const { response: stream } = await http.stream(gzUrl);
stream.pause();
const baseDir = manifest.baseDir || this.config.s3Key('baseDir', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
});
const extraction = tar_1.extract(stream, baseDir, output, manifest.sha256gz);
// to-do: use cli.action.type
if (cli_ux_1.default.action.frames) {
// if spinner action
const total = parseInt(stream.headers['content-length'], 10);
let current = 0;
const updateStatus = _.throttle((newStatus) => {
cli_ux_1.default.action.status = newStatus;
}, 250, { leading: true, trailing: false });
stream.on('data', data => {
current += data.length;
updateStatus(`${filesize(current)}/${filesize(total)}`);
});
}
stream.resume();
await extraction;
}
async update(manifest, channel = 'stable') {
const { channel: manifestChannel } = manifest;
if (manifestChannel)
channel = manifestChannel;
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI from ${color_1.default.green(this.currentVersion)} to ${color_1.default.green(this.updatedVersion)}${channel === 'stable' ? '' : ' (' + color_1.default.yellow(channel) + ')'}`);
await this.ensureClientDir();
const output = path.join(this.clientRoot, this.updatedVersion);
if (!await fs.pathExists(output)) {
await this.downloadAndExtract(output, manifest, channel);
}
await this.setChannel();
await this.createBin(this.updatedVersion);
await this.touch();
await this.reexec();
}
async updateToExistingVersion(version) { | const instructions = this.config.scopedEnvVar('UPDATE_INSTRUCTIONS');
if (instructions)
this.warn(instructions);
return 'not updatable';
}
if (this.currentVersion === this.updatedVersion) {
if (this.config.scopedEnvVar('HIDE_UPDATED_MESSAGE'))
return 'done';
return `already on latest version: ${this.currentVersion}`;
}
return false;
}
async determineChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
if (fs.existsSync(channelPath)) {
const channel = await fs.readFile(channelPath, 'utf8');
return String(channel).trim();
}
return this.config.channel || 'stable';
}
async determineCurrentVersion() {
try {
const currentVersion = await fs.readFile(this.clientBin, 'utf8');
const matches = currentVersion.match(/\.\.[/|\\](.+)[/|\\]bin/);
return matches ? matches[1] : this.config.version;
}
catch (error) {
this.debug(error);
}
return this.config.version;
}
s3ChannelManifestKey(bin, platform, arch, folder) {
let s3SubDir = folder || '';
if (s3SubDir !== '' && s3SubDir.slice(-1) !== '/')
s3SubDir = `${s3SubDir}/`;
return path.join(s3SubDir, 'channels', this.channel, `${bin}-${platform}-${arch}-buildmanifest`);
}
async setChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
fs.writeFile(channelPath, this.channel, 'utf8');
}
async logChop() {
try {
this.debug('log chop');
const logChopper = require('log-chopper').default;
await logChopper.chop(this.config.errlog);
}
catch (error) {
this.debug(error.message);
}
}
async mtime(f) {
const { mtime } = await fs.stat(f);
return mtime;
}
// when autoupdating, wait until the CLI isn't active
async debounce() {
let output = false;
const lastrunfile = path.join(this.config.cacheDir, 'lastrun');
const m = await this.mtime(lastrunfile);
m.setHours(m.getHours() + 1);
if (m > new Date()) {
const msg = `waiting until ${m.toISOString()} to update`;
if (output) {
this.debug(msg);
}
else {
await cli_ux_1.default.log(msg);
output = true;
}
await util_1.wait(60 * 1000); // wait 1 minute
return this.debounce();
}
cli_ux_1.default.log('time to update');
}
// removes any unused CLIs
async tidy() {
try {
const root = this.clientRoot;
if (!await fs.pathExists(root))
return;
const files = await util_1.ls(root);
const promises = files.map(async (f) => {
if (['bin', 'current', this.config.version].includes(path.basename(f.path)))
return;
const mtime = f.stat.mtime;
mtime.setHours(mtime.getHours() + (42 * 24));
if (mtime < new Date()) {
await fs.remove(f.path);
}
});
for (const p of promises)
await p; // eslint-disable-line no-await-in-loop
await this.logChop();
}
catch (error) {
cli_ux_1.default.warn(error);
}
}
async touch() {
// touch the client so it won't be tidied up right away
try {
const p = path.join(this | await this.createBin(version);
await this.touch();
}
async skipUpdate() {
if (!this.config.binPath) { | random_line_split |
update.js | .log(`Found versions: \n${versions.map(version => ` ${version}`).join('\n')}\n`);
const pinToVersion = await cli_ux_1.default.prompt('Enter a version to update to');
if (!versions.includes(pinToVersion))
throw new Error(`Version ${pinToVersion} not found in the locally installed versions.`);
if (!await fs.pathExists(path.join(this.clientRoot, pinToVersion))) {
throw new Error(`Version ${pinToVersion} is not already installed at ${this.clientRoot}.`);
}
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
this.debug(`switching to existing version ${pinToVersion}`);
this.updateToExistingVersion(pinToVersion);
this.log();
this.log(`Updating to an already installed version will not update the channel. If autoupdate is enabled, the CLI will eventually be updated back to ${this.channel}.`);
}
else {
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI`);
await this.config.runHook('preupdate', { channel: this.channel });
const manifest = await this.fetchManifest();
this.currentVersion = await this.determineCurrentVersion();
this.updatedVersion = manifest.sha ? `${manifest.version}-${manifest.sha}` : manifest.version;
const reason = await this.skipUpdate();
if (reason)
cli_ux_1.default.action.stop(reason || 'done');
else
await this.update(manifest);
this.debug('tidy');
await this.tidy();
await this.config.runHook('update', { channel: this.channel });
}
this.debug('done');
cli_ux_1.default.action.stop();
}
async fetchManifest() {
const http = require('http-call').HTTP;
cli_ux_1.default.action.status = 'fetching manifest';
if (!this.config.scopedEnvVarTrue('USE_LEGACY_UPDATE')) {
try {
const newManifestUrl = this.config.s3Url(this.s3ChannelManifestKey(this.config.bin, this.config.platform, this.config.arch, this.config.pjson.oclif.update.s3.folder));
const { body } = await http.get(newManifestUrl);
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
this.debug(error.message);
}
}
try {
const url = this.config.s3Url(this.config.s3Key('manifest', {
channel: this.channel,
platform: this.config.platform,
arch: this.config.arch,
}));
const { body } = await http.get(url);
// in case the content-type is not set, parse as a string
// this will happen if uploading without `oclif-dev publish`
if (typeof body === 'string') {
return JSON.parse(body);
}
return body;
}
catch (error) {
if (error.statusCode === 403)
throw new Error(`HTTP 403: Invalid channel ${this.channel}`);
throw error;
}
}
async downloadAndExtract(output, manifest, channel) {
const { version } = manifest;
const filesize = (n) => {
const [num, suffix] = require('filesize')(n, { output: 'array' });
return num.toFixed(1) + ` ${suffix}`;
};
const http = require('http-call').HTTP;
const gzUrl = manifest.gz || this.config.s3Url(this.config.s3Key('versioned', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
ext: 'gz',
}));
const { response: stream } = await http.stream(gzUrl);
stream.pause();
const baseDir = manifest.baseDir || this.config.s3Key('baseDir', {
version,
channel,
bin: this.config.bin,
platform: this.config.platform,
arch: this.config.arch,
});
const extraction = tar_1.extract(stream, baseDir, output, manifest.sha256gz);
// to-do: use cli.action.type
if (cli_ux_1.default.action.frames) {
// if spinner action
const total = parseInt(stream.headers['content-length'], 10);
let current = 0;
const updateStatus = _.throttle((newStatus) => {
cli_ux_1.default.action.status = newStatus;
}, 250, { leading: true, trailing: false });
stream.on('data', data => {
current += data.length;
updateStatus(`${filesize(current)}/${filesize(total)}`);
});
}
stream.resume();
await extraction;
}
async update(manifest, channel = 'stable') {
const { channel: manifestChannel } = manifest;
if (manifestChannel)
channel = manifestChannel;
cli_ux_1.default.action.start(`${this.config.name}: Updating CLI from ${color_1.default.green(this.currentVersion)} to ${color_1.default.green(this.updatedVersion)}${channel === 'stable' ? '' : ' (' + color_1.default.yellow(channel) + ')'}`);
await this.ensureClientDir();
const output = path.join(this.clientRoot, this.updatedVersion);
if (!await fs.pathExists(output)) {
await this.downloadAndExtract(output, manifest, channel);
}
await this.setChannel();
await this.createBin(this.updatedVersion);
await this.touch();
await this.reexec();
}
async updateToExistingVersion(version) {
await this.createBin(version);
await this.touch();
}
async skipUpdate() {
if (!this.config.binPath) {
const instructions = this.config.scopedEnvVar('UPDATE_INSTRUCTIONS');
if (instructions)
this.warn(instructions);
return 'not updatable';
}
if (this.currentVersion === this.updatedVersion) {
if (this.config.scopedEnvVar('HIDE_UPDATED_MESSAGE'))
return 'done';
return `already on latest version: ${this.currentVersion}`;
}
return false;
}
async determineChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
if (fs.existsSync(channelPath)) |
return this.config.channel || 'stable';
}
async determineCurrentVersion() {
try {
const currentVersion = await fs.readFile(this.clientBin, 'utf8');
const matches = currentVersion.match(/\.\.[/|\\](.+)[/|\\]bin/);
return matches ? matches[1] : this.config.version;
}
catch (error) {
this.debug(error);
}
return this.config.version;
}
s3ChannelManifestKey(bin, platform, arch, folder) {
let s3SubDir = folder || '';
if (s3SubDir !== '' && s3SubDir.slice(-1) !== '/')
s3SubDir = `${s3SubDir}/`;
return path.join(s3SubDir, 'channels', this.channel, `${bin}-${platform}-${arch}-buildmanifest`);
}
async setChannel() {
const channelPath = path.join(this.config.dataDir, 'channel');
fs.writeFile(channelPath, this.channel, 'utf8');
}
async logChop() {
try {
this.debug('log chop');
const logChopper = require('log-chopper').default;
await logChopper.chop(this.config.errlog);
}
catch (error) {
this.debug(error.message);
}
}
async mtime(f) {
const { mtime } = await fs.stat(f);
return mtime;
}
// when autoupdating, wait until the CLI isn't active
async debounce() {
let output = false;
const lastrunfile = path.join(this.config.cacheDir, 'lastrun');
const m = await this.mtime(lastrunfile);
m.setHours(m.getHours() + 1);
if (m > new Date()) {
const msg = `waiting until ${m.toISOString()} to update`;
if (output) {
this.debug(msg);
}
else {
await cli_ux_1.default.log(msg);
output = true;
}
await util_1.wait(60 * 1000); // wait 1 minute
return this.debounce();
}
cli_ux_1.default.log('time to update');
}
// removes any unused CLIs
async tidy() {
try {
const root = this.clientRoot;
if (!await fs.pathExists(root))
return;
const files = await util_1.ls(root);
const promises = files.map(async (f) => {
if (['bin', 'current', this.config.version].includes(path.basename(f.path)))
return;
const mtime = f.stat.mtime;
mtime.setHours(mtime.getHours() + (42 * 24));
if (mtime < new Date()) {
await fs.remove(f.path);
}
});
for (const p of promises)
await p; // eslint-disable-line no-await-in-loop
await this.logChop();
}
catch (error) {
cli_ux_1.default.warn(error);
}
}
async touch() {
// touch the client so it won't be tidied up right away
try {
const p = path.join | {
const channel = await fs.readFile(channelPath, 'utf8');
return String(channel).trim();
} | conditional_block |
rnn.py | set and ground truth label tensors
:param options: (hyper)parameters of the neural network model. See method unpack_options for details on the
full list of configurable options
"""
self.training_data = np.array(training_data, dtype=np.float32)
self.training_label = np.array(training_label, dtype=np.float32)
self.test_data = np.array(test_data, dtype=np.float32)
self.test_label = np.array(test_label, dtype=np.float32)
# Sanity checks
if self.training_data.shape[0] != self.training_label.shape[0]:
raise ValueError("The length of training_data tensor does not match the training_label tensor!")
if self.test_label.shape[0] != self.test_data.shape[0]:
raise ValueError("The length of test_data tensor does not match the test_label tensor!")
self.options = self.unpack_options(**options)
if self.options['input_dimension'] is None:
# Data dimension of a single sample
self.input_dimensions = 1
else:
self.input_dimensions = self.options['input_dimension']
self.graph = None
self.loss = None
self.optimizer = None
self.predict = None
self.tf_labels = None
self.tf_dataset = None
self.learning_rate = None
# Two lists to store the losses and accuracies during training and testing
self.train_losses = []
self.train_accuracies = []
def | (self):
"""
Set up a computation graph for TensorFlow
:return: None
"""
self.graph = tf.Graph()
model_type = self.options['model_type']
optimiser_selected = self.options['optimizer']
with self.graph.as_default():
self.tf_dataset = tf.placeholder(tf.float32,
shape=(None, self.options['num_steps'], self.input_dimensions))
self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
# Forward pass
if model_type == 'rnn':
self.predict = self.rnn_model(self.tf_dataset)
elif model_type == 'lstm':
self.predict = self.lstm_model(self.tf_dataset)
else:
raise NotImplementedError("Unimplemented RNN model keyword")
self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))
if self.options['regularisation_coeff'] > 0.:
# Add in L2 penalty for regularisation if required
penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)
for var in tf.trainable_variables())
self.loss += penalty
if self.options['use_customised_optimizer'] is False:
if optimiser_selected == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif optimiser_selected == 'grad':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif optimiser_selected == 'ada':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif optimiser_selected == 'rms':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
else:
raise NotImplementedError("Unimplemented built-in optimiser keyword.")
else:
self.optimizer = self.options['customized_optimizer']
self.minimise = self.optimizer.minimize(self.loss)
def run(self):
"""
Create a session according to the computation graph and run the model
:return: None
"""
if self.graph is None:
raise ValueError("Create TensorFlow graph before running a session.")
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
# Stochastic gradient descent: train the data with a mini-batch each iteration
batch_size = self.options['batch_size']
for epoch_idx in range(self.options['num_epoch']):
training_epoch = self.training_data[epoch_idx]
label_epoch = self.training_label[epoch_idx]
batch_count = training_epoch.shape[0] // batch_size
learning_rate = self.options['learning_rate']
if self.options['learning_rate_decay_coeff'] > 0.:
learning_rate *= self.options['learning_rate_decay_coeff'] ** \
max(float(epoch_idx + 1 - self.options['init_epoch']), 0.0)
for batch in range(batch_count):
try:
batch_data = training_epoch[batch*batch_size:(batch+1)*batch_size, :, :]
batch_labels = label_epoch[batch*batch_size:(batch+1)*batch_size, :]
except KeyError:
batch_data = training_epoch[batch*batch_size:, :, :]
batch_labels = label_epoch[batch*batch_size:, :]
feed_dict = {
self.tf_dataset: batch_data,
self.tf_labels: batch_labels,
self.learning_rate: learning_rate}
p, l, _, = session.run([self.predict, self.loss, self.minimise], feed_dict=feed_dict)
self.train_losses.append(l)
self.train_accuracies.append(self.get_accuracy(batch_labels, p))
# Finally run the data on test data
final_feed_dict = {
self.tf_dataset: self.test_data,
self.tf_labels: self.test_label,
self.learning_rate: 0.,
}
self.predict, final_loss = session.run([self.predict, self.minimise], feed_dict=final_feed_dict)
return self.predict
# Implementation of RNN and LSTM models
def rnn_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicRNNCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicRNNCell(num_cells,)
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
def lstm_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicLSTMCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_cells, )
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
# Utility Functions
@staticmethod
def unpack_options(num_cells=24,
learning_rate=1e-3,
learning_rate_decay_coeff=0.,
init_epoch=5,
batch_size=100,
optimizer='rms',
model_type='rnn',
use_customized_optimizer=False,
customized_optimizer=None,
num_layers=1,
regularisation_coeff=0.,
input_dimension=None,
num_steps=30,
num_epoch=1,):
"""
:param num_cells: Number of hidden units per layer in the RNN/LSTM network
:param learning_rate: initial learning rate
:param learning_rate_decay_coeff: the exponentially decaying coefficient of learning rate for each epoch.
:param init_epoch: initial number of epoches where the learning rate will be kept constant. Only relevant if
learning_rate_decay_coeff is a number other than zero.
:param batch_size: batch size
:param optimizer: choice of the chosen optimiser ('rms', 'adam', etc)
:param model_type: 'rnn' or 'lstm'
:param use_customized_optimizer: bool - if True the optimizer object in customized_optimizer
will be used instead.
:param customized_optimizer: optimizer object - if use_customized_optimizer is True, this optimizer will be used
:param num_layers: number of layers of hidden units in the RNN/LSTM
:param regularisation_coeff: regularisation coefficient (a.k.a lambda)
:param input_dimension: input dimension of the each data point. For scalar time series this value is 1
:param num_steps: number of data points of each input sequence
:param num_epoch: number of training epochs
:return:
"""
options = {
'num_cells': num_cells,
'learning_rate': learning_rate,
'learning_rate_decay_coeff': learning_rate_decay_coeff,
'init_epoch': init_epoch,
'batch_size': batch_size,
'optimizer': optimizer,
'model_type': model_type,
'num_layer': num_layers,
'use_customised_optimizer': use_customized_optimizer,
'customized_optimizer': customized_optimizer,
'regularisation_coeff': regularisation_coeff,
"input_dimension": input_dimension,
'num_steps': num_steps | create_graph | identifier_name |
rnn.py | self.tf_dataset = None
self.learning_rate = None
# Two lists to store the losses and accuracies during training and testing
self.train_losses = []
self.train_accuracies = []
def create_graph(self):
"""
Set up a computation graph for TensorFlow
:return: None
"""
self.graph = tf.Graph()
model_type = self.options['model_type']
optimiser_selected = self.options['optimizer']
with self.graph.as_default():
self.tf_dataset = tf.placeholder(tf.float32,
shape=(None, self.options['num_steps'], self.input_dimensions))
self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
# Forward pass
if model_type == 'rnn':
self.predict = self.rnn_model(self.tf_dataset)
elif model_type == 'lstm':
self.predict = self.lstm_model(self.tf_dataset)
else:
raise NotImplementedError("Unimplemented RNN model keyword")
self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))
if self.options['regularisation_coeff'] > 0.:
# Add in L2 penalty for regularisation if required
penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)
for var in tf.trainable_variables())
self.loss += penalty
if self.options['use_customised_optimizer'] is False:
if optimiser_selected == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif optimiser_selected == 'grad':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif optimiser_selected == 'ada':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif optimiser_selected == 'rms':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
else:
raise NotImplementedError("Unimplemented built-in optimiser keyword.")
else:
self.optimizer = self.options['customized_optimizer']
self.minimise = self.optimizer.minimize(self.loss)
def run(self):
"""
Create a session according to the computation graph and run the model
:return: None
"""
if self.graph is None:
raise ValueError("Create TensorFlow graph before running a session.")
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
# Stochastic gradient descent: train the data with a mini-batch each iteration
batch_size = self.options['batch_size']
for epoch_idx in range(self.options['num_epoch']):
training_epoch = self.training_data[epoch_idx]
label_epoch = self.training_label[epoch_idx]
batch_count = training_epoch.shape[0] // batch_size
learning_rate = self.options['learning_rate']
if self.options['learning_rate_decay_coeff'] > 0.:
learning_rate *= self.options['learning_rate_decay_coeff'] ** \
max(float(epoch_idx + 1 - self.options['init_epoch']), 0.0)
for batch in range(batch_count):
try:
batch_data = training_epoch[batch*batch_size:(batch+1)*batch_size, :, :]
batch_labels = label_epoch[batch*batch_size:(batch+1)*batch_size, :]
except KeyError:
batch_data = training_epoch[batch*batch_size:, :, :]
batch_labels = label_epoch[batch*batch_size:, :]
feed_dict = {
self.tf_dataset: batch_data,
self.tf_labels: batch_labels,
self.learning_rate: learning_rate}
p, l, _, = session.run([self.predict, self.loss, self.minimise], feed_dict=feed_dict)
self.train_losses.append(l)
self.train_accuracies.append(self.get_accuracy(batch_labels, p))
# Finally run the data on test data
final_feed_dict = {
self.tf_dataset: self.test_data,
self.tf_labels: self.test_label,
self.learning_rate: 0.,
}
self.predict, final_loss = session.run([self.predict, self.minimise], feed_dict=final_feed_dict)
return self.predict
# Implementation of RNN and LSTM models
def rnn_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicRNNCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicRNNCell(num_cells,)
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
def lstm_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicLSTMCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_cells, )
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
# Utility Functions
@staticmethod
def unpack_options(num_cells=24,
learning_rate=1e-3,
learning_rate_decay_coeff=0.,
init_epoch=5,
batch_size=100,
optimizer='rms',
model_type='rnn',
use_customized_optimizer=False,
customized_optimizer=None,
num_layers=1,
regularisation_coeff=0.,
input_dimension=None,
num_steps=30,
num_epoch=1,):
"""
:param num_cells: Number of hidden units per layer in the RNN/LSTM network
:param learning_rate: initial learning rate
:param learning_rate_decay_coeff: the exponentially decaying coefficient of learning rate for each epoch.
:param init_epoch: initial number of epoches where the learning rate will be kept constant. Only relevant if
learning_rate_decay_coeff is a number other than zero.
:param batch_size: batch size
:param optimizer: choice of the chosen optimiser ('rms', 'adam', etc)
:param model_type: 'rnn' or 'lstm'
:param use_customized_optimizer: bool - if True the optimizer object in customized_optimizer
will be used instead.
:param customized_optimizer: optimizer object - if use_customized_optimizer is True, this optimizer will be used
:param num_layers: number of layers of hidden units in the RNN/LSTM
:param regularisation_coeff: regularisation coefficient (a.k.a lambda)
:param input_dimension: input dimension of the each data point. For scalar time series this value is 1
:param num_steps: number of data points of each input sequence
:param num_epoch: number of training epochs
:return:
"""
options = {
'num_cells': num_cells,
'learning_rate': learning_rate,
'learning_rate_decay_coeff': learning_rate_decay_coeff,
'init_epoch': init_epoch,
'batch_size': batch_size,
'optimizer': optimizer,
'model_type': model_type,
'num_layer': num_layers,
'use_customised_optimizer': use_customized_optimizer,
'customized_optimizer': customized_optimizer,
'regularisation_coeff': regularisation_coeff,
"input_dimension": input_dimension,
'num_steps': num_steps,
'num_epoch': num_epoch
}
return options
@staticmethod
def get_accuracy(label, predict, tolerance=1e-2):
"""
:param label: label series
:param predict: predict series
:param tolerance: the maximum error between the element in the label and predict for the prediction to be
declared correct. For classification problems this value should be 0.
:return:
"""
if tolerance == 0:
return (100.0 * np.sum(label == predict)) / predict.shape[0]
else:
correct_idx = (np.abs(label - predict) < tolerance)
return np.sum(correct_idx)
# Plotter Function
def gen_summary(self):
| if len(self.train_losses) == 0:
raise ValueError("The model session has not been run!")
plt.subplot(121)
plt.plot(self.train_losses)
plt.ylabel("Loss")
plt.xlabel('Number of batch iterations')
plt.title("Loss vs iterations")
plt.subplot(122)
plt.plot(self.predict, label='Predictions')
plt.plot(self.test_label, label='Test Labels')
plt.title("Test label vs Prediction")
plt.legend() | identifier_body |
|
rnn.py | ['input_dimension']
self.graph = None
self.loss = None
self.optimizer = None
self.predict = None
self.tf_labels = None
self.tf_dataset = None
self.learning_rate = None
# Two lists to store the losses and accuracies during training and testing
self.train_losses = []
self.train_accuracies = []
def create_graph(self):
"""
Set up a computation graph for TensorFlow
:return: None
"""
self.graph = tf.Graph()
model_type = self.options['model_type']
optimiser_selected = self.options['optimizer']
with self.graph.as_default():
self.tf_dataset = tf.placeholder(tf.float32,
shape=(None, self.options['num_steps'], self.input_dimensions))
self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
# Forward pass
if model_type == 'rnn':
self.predict = self.rnn_model(self.tf_dataset)
elif model_type == 'lstm':
self.predict = self.lstm_model(self.tf_dataset)
else:
raise NotImplementedError("Unimplemented RNN model keyword")
self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))
if self.options['regularisation_coeff'] > 0.:
# Add in L2 penalty for regularisation if required
penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)
for var in tf.trainable_variables())
self.loss += penalty
if self.options['use_customised_optimizer'] is False:
if optimiser_selected == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif optimiser_selected == 'grad':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif optimiser_selected == 'ada':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif optimiser_selected == 'rms':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
else:
raise NotImplementedError("Unimplemented built-in optimiser keyword.")
else:
self.optimizer = self.options['customized_optimizer']
self.minimise = self.optimizer.minimize(self.loss)
def run(self):
"""
Create a session according to the computation graph and run the model
:return: None
"""
if self.graph is None:
raise ValueError("Create TensorFlow graph before running a session.")
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
# Stochastic gradient descent: train the data with a mini-batch each iteration
batch_size = self.options['batch_size']
for epoch_idx in range(self.options['num_epoch']):
training_epoch = self.training_data[epoch_idx]
label_epoch = self.training_label[epoch_idx]
batch_count = training_epoch.shape[0] // batch_size
learning_rate = self.options['learning_rate']
if self.options['learning_rate_decay_coeff'] > 0.:
learning_rate *= self.options['learning_rate_decay_coeff'] ** \
max(float(epoch_idx + 1 - self.options['init_epoch']), 0.0)
for batch in range(batch_count):
try:
batch_data = training_epoch[batch*batch_size:(batch+1)*batch_size, :, :]
batch_labels = label_epoch[batch*batch_size:(batch+1)*batch_size, :]
except KeyError:
batch_data = training_epoch[batch*batch_size:, :, :]
batch_labels = label_epoch[batch*batch_size:, :]
feed_dict = {
self.tf_dataset: batch_data,
self.tf_labels: batch_labels,
self.learning_rate: learning_rate}
p, l, _, = session.run([self.predict, self.loss, self.minimise], feed_dict=feed_dict)
self.train_losses.append(l)
self.train_accuracies.append(self.get_accuracy(batch_labels, p))
# Finally run the data on test data
final_feed_dict = {
self.tf_dataset: self.test_data,
self.tf_labels: self.test_label,
self.learning_rate: 0.,
}
self.predict, final_loss = session.run([self.predict, self.minimise], feed_dict=final_feed_dict)
return self.predict
# Implementation of RNN and LSTM models
def rnn_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicRNNCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicRNNCell(num_cells,)
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
def lstm_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicLSTMCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_cells, )
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
# Utility Functions
@staticmethod
def unpack_options(num_cells=24,
learning_rate=1e-3,
learning_rate_decay_coeff=0.,
init_epoch=5,
batch_size=100,
optimizer='rms',
model_type='rnn',
use_customized_optimizer=False,
customized_optimizer=None,
num_layers=1,
regularisation_coeff=0.,
input_dimension=None,
num_steps=30,
num_epoch=1,):
"""
:param num_cells: Number of hidden units per layer in the RNN/LSTM network
:param learning_rate: initial learning rate
:param learning_rate_decay_coeff: the exponentially decaying coefficient of learning rate for each epoch.
:param init_epoch: initial number of epoches where the learning rate will be kept constant. Only relevant if
learning_rate_decay_coeff is a number other than zero.
:param batch_size: batch size
:param optimizer: choice of the chosen optimiser ('rms', 'adam', etc)
:param model_type: 'rnn' or 'lstm'
:param use_customized_optimizer: bool - if True the optimizer object in customized_optimizer
will be used instead.
:param customized_optimizer: optimizer object - if use_customized_optimizer is True, this optimizer will be used
:param num_layers: number of layers of hidden units in the RNN/LSTM
:param regularisation_coeff: regularisation coefficient (a.k.a lambda)
:param input_dimension: input dimension of the each data point. For scalar time series this value is 1
:param num_steps: number of data points of each input sequence
:param num_epoch: number of training epochs
:return:
"""
options = {
'num_cells': num_cells,
'learning_rate': learning_rate,
'learning_rate_decay_coeff': learning_rate_decay_coeff,
'init_epoch': init_epoch,
'batch_size': batch_size,
'optimizer': optimizer,
'model_type': model_type,
'num_layer': num_layers,
'use_customised_optimizer': use_customized_optimizer,
'customized_optimizer': customized_optimizer,
'regularisation_coeff': regularisation_coeff,
"input_dimension": input_dimension,
'num_steps': num_steps,
'num_epoch': num_epoch
}
return options
@staticmethod
def get_accuracy(label, predict, tolerance=1e-2):
"""
:param label: label series
:param predict: predict series
:param tolerance: the maximum error between the element in the label and predict for the prediction to be
declared correct. For classification problems this value should be 0.
:return:
"""
if tolerance == 0:
return (100.0 * np.sum(label == predict)) / predict.shape[0]
else:
correct_idx = (np.abs(label - predict) < tolerance)
return np.sum(correct_idx)
# Plotter Function
def gen_summary(self):
if len(self.train_losses) == 0:
raise ValueError("The model session has not been run!")
plt.subplot(121)
plt.plot(self.train_losses)
plt.ylabel("Loss")
plt.xlabel('Number of batch iterations')
plt.title("Loss vs iterations") |
plt.subplot(122) | random_line_split |
|
rnn.py | set and ground truth label tensors
:param options: (hyper)parameters of the neural network model. See method unpack_options for details on the
full list of configurable options
"""
self.training_data = np.array(training_data, dtype=np.float32)
self.training_label = np.array(training_label, dtype=np.float32)
self.test_data = np.array(test_data, dtype=np.float32)
self.test_label = np.array(test_label, dtype=np.float32)
# Sanity checks
if self.training_data.shape[0] != self.training_label.shape[0]:
raise ValueError("The length of training_data tensor does not match the training_label tensor!")
if self.test_label.shape[0] != self.test_data.shape[0]:
raise ValueError("The length of test_data tensor does not match the test_label tensor!")
self.options = self.unpack_options(**options)
if self.options['input_dimension'] is None:
# Data dimension of a single sample
self.input_dimensions = 1
else:
self.input_dimensions = self.options['input_dimension']
self.graph = None
self.loss = None
self.optimizer = None
self.predict = None
self.tf_labels = None
self.tf_dataset = None
self.learning_rate = None
# Two lists to store the losses and accuracies during training and testing
self.train_losses = []
self.train_accuracies = []
def create_graph(self):
"""
Set up a computation graph for TensorFlow
:return: None
"""
self.graph = tf.Graph()
model_type = self.options['model_type']
optimiser_selected = self.options['optimizer']
with self.graph.as_default():
self.tf_dataset = tf.placeholder(tf.float32,
shape=(None, self.options['num_steps'], self.input_dimensions))
self.tf_labels = tf.placeholder(tf.float32, shape=(None, self.input_dimensions))
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
# Forward pass
if model_type == 'rnn':
self.predict = self.rnn_model(self.tf_dataset)
elif model_type == 'lstm':
self.predict = self.lstm_model(self.tf_dataset)
else:
raise NotImplementedError("Unimplemented RNN model keyword")
self.loss = tf.reduce_mean(tf.square(self.predict - self.tf_labels))
if self.options['regularisation_coeff'] > 0.:
# Add in L2 penalty for regularisation if required
|
if self.options['use_customised_optimizer'] is False:
if optimiser_selected == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif optimiser_selected == 'grad':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif optimiser_selected == 'ada':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif optimiser_selected == 'rms':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
else:
raise NotImplementedError("Unimplemented built-in optimiser keyword.")
else:
self.optimizer = self.options['customized_optimizer']
self.minimise = self.optimizer.minimize(self.loss)
def run(self):
"""
Create a session according to the computation graph and run the model
:return: None
"""
if self.graph is None:
raise ValueError("Create TensorFlow graph before running a session.")
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
# Stochastic gradient descent: train the data with a mini-batch each iteration
batch_size = self.options['batch_size']
for epoch_idx in range(self.options['num_epoch']):
training_epoch = self.training_data[epoch_idx]
label_epoch = self.training_label[epoch_idx]
batch_count = training_epoch.shape[0] // batch_size
learning_rate = self.options['learning_rate']
if self.options['learning_rate_decay_coeff'] > 0.:
learning_rate *= self.options['learning_rate_decay_coeff'] ** \
max(float(epoch_idx + 1 - self.options['init_epoch']), 0.0)
for batch in range(batch_count):
try:
batch_data = training_epoch[batch*batch_size:(batch+1)*batch_size, :, :]
batch_labels = label_epoch[batch*batch_size:(batch+1)*batch_size, :]
except KeyError:
batch_data = training_epoch[batch*batch_size:, :, :]
batch_labels = label_epoch[batch*batch_size:, :]
feed_dict = {
self.tf_dataset: batch_data,
self.tf_labels: batch_labels,
self.learning_rate: learning_rate}
p, l, _, = session.run([self.predict, self.loss, self.minimise], feed_dict=feed_dict)
self.train_losses.append(l)
self.train_accuracies.append(self.get_accuracy(batch_labels, p))
# Finally run the data on test data
final_feed_dict = {
self.tf_dataset: self.test_data,
self.tf_labels: self.test_label,
self.learning_rate: 0.,
}
self.predict, final_loss = session.run([self.predict, self.minimise], feed_dict=final_feed_dict)
return self.predict
# Implementation of RNN and LSTM models
def rnn_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicRNNCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicRNNCell(num_cells,)
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
def lstm_model(self, training_data):
num_layer = self.options['num_layer']
num_cells = self.options['num_cells']
if num_layer == 1:
all_cells = tf.nn.rnn_cell.BasicLSTMCell(num_cells)
else:
cells = []
for i in range(num_layer):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_cells, )
cells.append(cell)
all_cells = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(all_cells, training_data, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
W = tf.Variable(tf.truncated_normal([num_cells, self.input_dimensions]))
b = tf.Variable(tf.random_normal([self.input_dimensions]))
logit = tf.matmul(output, W) + b
return logit
# Utility Functions
@staticmethod
def unpack_options(num_cells=24,
learning_rate=1e-3,
learning_rate_decay_coeff=0.,
init_epoch=5,
batch_size=100,
optimizer='rms',
model_type='rnn',
use_customized_optimizer=False,
customized_optimizer=None,
num_layers=1,
regularisation_coeff=0.,
input_dimension=None,
num_steps=30,
num_epoch=1,):
"""
:param num_cells: Number of hidden units per layer in the RNN/LSTM network
:param learning_rate: initial learning rate
:param learning_rate_decay_coeff: the exponentially decaying coefficient of learning rate for each epoch.
:param init_epoch: initial number of epoches where the learning rate will be kept constant. Only relevant if
learning_rate_decay_coeff is a number other than zero.
:param batch_size: batch size
:param optimizer: choice of the chosen optimiser ('rms', 'adam', etc)
:param model_type: 'rnn' or 'lstm'
:param use_customized_optimizer: bool - if True the optimizer object in customized_optimizer
will be used instead.
:param customized_optimizer: optimizer object - if use_customized_optimizer is True, this optimizer will be used
:param num_layers: number of layers of hidden units in the RNN/LSTM
:param regularisation_coeff: regularisation coefficient (a.k.a lambda)
:param input_dimension: input dimension of the each data point. For scalar time series this value is 1
:param num_steps: number of data points of each input sequence
:param num_epoch: number of training epochs
:return:
"""
options = {
'num_cells': num_cells,
'learning_rate': learning_rate,
'learning_rate_decay_coeff': learning_rate_decay_coeff,
'init_epoch': init_epoch,
'batch_size': batch_size,
'optimizer': optimizer,
'model_type': model_type,
'num_layer': num_layers,
'use_customised_optimizer': use_customized_optimizer,
'customized_optimizer': customized_optimizer,
'regularisation_coeff': regularisation_coeff,
"input_dimension": input_dimension,
'num_steps': num_steps | penalty = self.options['regularisation_coeff'] * sum(tf.nn.l2_loss(var)
for var in tf.trainable_variables())
self.loss += penalty | conditional_block |
main.go | output: %v", err)
}
procs := make(map[int]*Proc)
done := make(chan error)
go func() {
tids := make(map[uint64]uint64)
stacks := make(map[uint64]*Stack)
locs := make(map[uint64]*profile.Location)
funcs := make(map[string]*profile.Function)
s := bufio.NewScanner(perfOut)
getProc := func(pid int) *Proc {
p := procs[pid]
if p == nil {
p = &Proc{
pid: pid,
load: make(map[int]int),
samples: make(map[uint64]*Sample),
}
procs[pid] = p
}
return p
}
for s.Scan() {
ln := s.Text()
if ln == "" || ln[0] == '#' {
continue
}
if strings.Contains(ln, " sched:sched_switch:") {
/* The format is:
0/0 [006] sched:sched_switch: prev_comm=swapper/6 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=rcuos/2 next_pid=11 next_prio=120
ffffffff817297f0 __schedule
ffffffff8172a109 schedule_preempt_disabled
ffffffff810bf66e cpu_startup_entry
ffffffff8104160d start_secondary
*/
i := 0
for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 1: %v\n", ln)
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 2: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 3: %v\n", ln)
continue
}
tids[tid] = pid
pos := strings.Index(ln, " prev_pid=")
if pos == -1 {
fmt.Fprintf(os.Stderr, "failed to parse pid 4: %v\n", ln)
continue
}
pos += len(" prev_pid=")
i = pos
for ; ln[i] != ' '; i++ {
}
ptid, err := strconv.ParseUint(ln[pos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 5: %v\n", ln)
continue
}
ppid := tids[ptid]
if ppid == 0 {
ppid = ptid
}
pos = strings.Index(ln, " next_pid=")
if pos == -1 {
fmt.Fprintf(os.Stderr, "failed to parse pid 6: v\n", ln)
continue
}
pos += len(" next_pid=")
i = pos
for ; ln[i] != ' '; i++ {
}
ntid, err := strconv.ParseUint(ln[pos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 7: v\n", ln)
continue
}
npid := tids[ntid]
if npid == 0 {
npid = ntid
}
p := getProc(int(ppid))
if p.run > 0 {
p.run--
}
p = getProc(int(npid))
p.run++
if p.run > 1 {
p.multithreaded = true
}
} else if strings.Contains(ln, " cycles:") {
/* The format is:
0/0 [006] cycles:
ffffffff8104f45a native_write_msr_safe
ffffffff8102fa4c intel_pmu_enable_all
ffffffff81029ca4 x86_pmu_enable
ffffffff81143487 perf_pmu_enable
ffffffff81027d8a x86_pmu_commit_txn
ffffffff81143f00 group_sched_in
ffffffff811443c2 __perf_event_enable
ffffffff81140000 remote_function
ffffffff810dcf60 generic_smp_call_function_single_interrupt
ffffffff81040cd7 smp_call_function_single_interrupt
ffffffff8173759d call_function_single_interrupt
ffffffff815d6c59 cpuidle_idle_call
ffffffff8101d3ee arch_cpu_idle
ffffffff810bf4f5 cpu_startup_entry
ffffffff8104160d start_secondary | for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 8: %v '%v'\n", ln, ln[pidPos:i])
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 9: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 10: %v\n", ln)
continue
}
tids[tid] = pid
if *flagPid != 0 && uint64(*flagPid) != pid {
continue
}
p := getProc(int(pid))
if !*flagInit && !p.multithreaded {
continue
}
run := p.run
if run == 0 {
run = 1 // somehow it happens
}
p.load[run]++
frames := parseStack(s)
frames = append(frames, &Frame{uint64(run), fmt.Sprintf("LOAD %v", run)})
stkHash := hashStack(frames)
stack := stacks[stkHash]
if stack == nil {
stack = &Stack{
frames: make([]*profile.Location, len(frames)),
}
for i, f := range frames {
loc := locs[f.pc]
if loc == nil {
fn := funcs[f.fn]
if fn == nil {
fname := string(append([]byte{}, f.fn...))
fn = &profile.Function{
ID: uint64(len(funcs) + 1),
Name: fname,
SystemName: fname,
}
funcs[fname] = fn
}
loc = &profile.Location{
ID: uint64(len(locs) + 1),
Address: f.pc,
Line: []profile.Line{
profile.Line{
Function: fn,
Line: 1,
},
},
}
locs[f.pc] = loc
}
stack.frames[i] = loc
}
stacks[stkHash] = stack
}
sample := p.samples[stkHash]
if sample == nil {
sample = &Sample{
run: run,
stack: stack,
}
p.samples[stkHash] = sample
}
if sample.run != run {
fmt.Fprintf(os.Stderr, "misaccounted sample: %v -> %v\n", run, sample.run)
}
sample.n++
p.n++
}
}
done <- s.Err()
}()
if err := perf.Start(); err != nil {
failf("failed to start perf: %v", err)
}
errOutput, _ := ioutil.ReadAll(perfOutErr)
if err := perf.Wait(); err != nil {
if false {
failf("perf failed: %v\n%s", err, errOutput)
}
}
if err := <-done; err != nil {
failf("failed to parse perf output: %v", err)
| */
i := 0 | random_line_split |
main.go | 1 {
fmt.Fprintf(os.Stderr, "failed to parse pid 6: v\n", ln)
continue
}
pos += len(" next_pid=")
i = pos
for ; ln[i] != ' '; i++ {
}
ntid, err := strconv.ParseUint(ln[pos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 7: v\n", ln)
continue
}
npid := tids[ntid]
if npid == 0 {
npid = ntid
}
p := getProc(int(ppid))
if p.run > 0 {
p.run--
}
p = getProc(int(npid))
p.run++
if p.run > 1 {
p.multithreaded = true
}
} else if strings.Contains(ln, " cycles:") {
/* The format is:
0/0 [006] cycles:
ffffffff8104f45a native_write_msr_safe
ffffffff8102fa4c intel_pmu_enable_all
ffffffff81029ca4 x86_pmu_enable
ffffffff81143487 perf_pmu_enable
ffffffff81027d8a x86_pmu_commit_txn
ffffffff81143f00 group_sched_in
ffffffff811443c2 __perf_event_enable
ffffffff81140000 remote_function
ffffffff810dcf60 generic_smp_call_function_single_interrupt
ffffffff81040cd7 smp_call_function_single_interrupt
ffffffff8173759d call_function_single_interrupt
ffffffff815d6c59 cpuidle_idle_call
ffffffff8101d3ee arch_cpu_idle
ffffffff810bf4f5 cpu_startup_entry
ffffffff8104160d start_secondary
*/
i := 0
for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 8: %v '%v'\n", ln, ln[pidPos:i])
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 9: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 10: %v\n", ln)
continue
}
tids[tid] = pid
if *flagPid != 0 && uint64(*flagPid) != pid {
continue
}
p := getProc(int(pid))
if !*flagInit && !p.multithreaded {
continue
}
run := p.run
if run == 0 {
run = 1 // somehow it happens
}
p.load[run]++
frames := parseStack(s)
frames = append(frames, &Frame{uint64(run), fmt.Sprintf("LOAD %v", run)})
stkHash := hashStack(frames)
stack := stacks[stkHash]
if stack == nil {
stack = &Stack{
frames: make([]*profile.Location, len(frames)),
}
for i, f := range frames {
loc := locs[f.pc]
if loc == nil {
fn := funcs[f.fn]
if fn == nil {
fname := string(append([]byte{}, f.fn...))
fn = &profile.Function{
ID: uint64(len(funcs) + 1),
Name: fname,
SystemName: fname,
}
funcs[fname] = fn
}
loc = &profile.Location{
ID: uint64(len(locs) + 1),
Address: f.pc,
Line: []profile.Line{
profile.Line{
Function: fn,
Line: 1,
},
},
}
locs[f.pc] = loc
}
stack.frames[i] = loc
}
stacks[stkHash] = stack
}
sample := p.samples[stkHash]
if sample == nil {
sample = &Sample{
run: run,
stack: stack,
}
p.samples[stkHash] = sample
}
if sample.run != run {
fmt.Fprintf(os.Stderr, "misaccounted sample: %v -> %v\n", run, sample.run)
}
sample.n++
p.n++
}
}
done <- s.Err()
}()
if err := perf.Start(); err != nil {
failf("failed to start perf: %v", err)
}
errOutput, _ := ioutil.ReadAll(perfOutErr)
if err := perf.Wait(); err != nil {
if false {
failf("perf failed: %v\n%s", err, errOutput)
}
}
if err := <-done; err != nil {
failf("failed to parse perf output: %v", err)
}
var proc *Proc
max := 0
for _, p := range procs {
if max < p.n {
max = p.n
proc = p
}
}
maxRun := 0
for run := range proc.load {
if maxRun < run {
maxRun = run
}
}
if *flagRealtime {
proc.n = 0
proc.load = make(map[int]int)
for _, s := range proc.samples {
s.n = int(float64(s.n) * float64(maxRun) / float64(s.run))
if s.n < 0 {
println("underflow:", s.n, maxRun, s.run, int(float64(s.n)*float64(maxRun)/float64(s.run)))
}
if proc.n > proc.n+s.n {
println("overflow:", proc.n, s.n, s.run)
}
proc.n += s.n
proc.load[s.run] += s.n
}
}
maxN := 0
total := 0
totalLoad := 0
load := make([]int, maxRun+1)
for run, n := range proc.load {
load[run] = n
total += n
totalLoad += run * n
if maxN < n {
maxN = n
}
}
fmt.Printf("pid=%v samples=%v avgload=%.1f\n", proc.pid, proc.n, float64(totalLoad)/float64(total))
for run, n := range load {
if run == 0 {
continue
}
fmt.Printf("%2v [%5.2f%%]: %v\n", run, float64(n)/float64(total)*100, strings.Repeat("*", int(float64(n)/float64(maxN)*100+0.5)))
}
p := &profile.Profile{
Period: 250000,
PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"},
SampleType: []*profile.ValueType{
{Type: "samples", Unit: "count"},
{Type: "cpu", Unit: "nanoseconds"},
},
}
locs := make(map[uint64]bool)
funcs := make(map[uint64]bool)
for _, s := range proc.samples {
if *flagCpu > 0 && *flagCpu != s.run {
continue
}
p.Sample = append(p.Sample, &profile.Sample{
Value: []int64{int64(s.n), int64(s.n) * p.Period},
Location: s.stack.frames,
})
for _, loc := range s.stack.frames {
if !locs[loc.ID] {
locs[loc.ID] = true
p.Location = append(p.Location, loc)
}
for _, line := range loc.Line {
if !funcs[line.Function.ID] {
funcs[line.Function.ID] = true
p.Function = append(p.Function, line.Function)
}
}
}
}
buff := bufio.NewWriter(f)
p.Write(buff)
buff.Flush()
f.Close()
exec.Command("go", "tool", "pprof", "-web", "-nodefraction=0.001", "-edgefraction=0.001", f.Name()).Run()
}
func | parseStack | identifier_name |
|
main.go | : %v", err)
}
procs := make(map[int]*Proc)
done := make(chan error)
go func() {
tids := make(map[uint64]uint64)
stacks := make(map[uint64]*Stack)
locs := make(map[uint64]*profile.Location)
funcs := make(map[string]*profile.Function)
s := bufio.NewScanner(perfOut)
getProc := func(pid int) *Proc {
p := procs[pid]
if p == nil {
p = &Proc{
pid: pid,
load: make(map[int]int),
samples: make(map[uint64]*Sample),
}
procs[pid] = p
}
return p
}
for s.Scan() {
ln := s.Text()
if ln == "" || ln[0] == '#' {
continue
}
if strings.Contains(ln, " sched:sched_switch:") {
/* The format is:
0/0 [006] sched:sched_switch: prev_comm=swapper/6 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=rcuos/2 next_pid=11 next_prio=120
ffffffff817297f0 __schedule
ffffffff8172a109 schedule_preempt_disabled
ffffffff810bf66e cpu_startup_entry
ffffffff8104160d start_secondary
*/
i := 0
for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 1: %v\n", ln)
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 2: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 3: %v\n", ln)
continue
}
tids[tid] = pid
pos := strings.Index(ln, " prev_pid=")
if pos == -1 {
fmt.Fprintf(os.Stderr, "failed to parse pid 4: %v\n", ln)
continue
}
pos += len(" prev_pid=")
i = pos
for ; ln[i] != ' '; i++ {
}
ptid, err := strconv.ParseUint(ln[pos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 5: %v\n", ln)
continue
}
ppid := tids[ptid]
if ppid == 0 {
ppid = ptid
}
pos = strings.Index(ln, " next_pid=")
if pos == -1 {
fmt.Fprintf(os.Stderr, "failed to parse pid 6: v\n", ln)
continue
}
pos += len(" next_pid=")
i = pos
for ; ln[i] != ' '; i++ {
}
ntid, err := strconv.ParseUint(ln[pos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 7: v\n", ln)
continue
}
npid := tids[ntid]
if npid == 0 {
npid = ntid
}
p := getProc(int(ppid))
if p.run > 0 {
p.run--
}
p = getProc(int(npid))
p.run++
if p.run > 1 {
p.multithreaded = true
}
} else if strings.Contains(ln, " cycles:") {
/* The format is:
0/0 [006] cycles:
ffffffff8104f45a native_write_msr_safe
ffffffff8102fa4c intel_pmu_enable_all
ffffffff81029ca4 x86_pmu_enable
ffffffff81143487 perf_pmu_enable
ffffffff81027d8a x86_pmu_commit_txn
ffffffff81143f00 group_sched_in
ffffffff811443c2 __perf_event_enable
ffffffff81140000 remote_function
ffffffff810dcf60 generic_smp_call_function_single_interrupt
ffffffff81040cd7 smp_call_function_single_interrupt
ffffffff8173759d call_function_single_interrupt
ffffffff815d6c59 cpuidle_idle_call
ffffffff8101d3ee arch_cpu_idle
ffffffff810bf4f5 cpu_startup_entry
ffffffff8104160d start_secondary
*/
i := 0
for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 8: %v '%v'\n", ln, ln[pidPos:i])
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 9: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 10: %v\n", ln)
continue
}
tids[tid] = pid
if *flagPid != 0 && uint64(*flagPid) != pid {
continue
}
p := getProc(int(pid))
if !*flagInit && !p.multithreaded {
continue
}
run := p.run
if run == 0 {
run = 1 // somehow it happens
}
p.load[run]++
frames := parseStack(s)
frames = append(frames, &Frame{uint64(run), fmt.Sprintf("LOAD %v", run)})
stkHash := hashStack(frames)
stack := stacks[stkHash]
if stack == nil {
stack = &Stack{
frames: make([]*profile.Location, len(frames)),
}
for i, f := range frames {
loc := locs[f.pc]
if loc == nil {
fn := funcs[f.fn]
if fn == nil {
fname := string(append([]byte{}, f.fn...))
fn = &profile.Function{
ID: uint64(len(funcs) + 1),
Name: fname,
SystemName: fname,
}
funcs[fname] = fn
}
loc = &profile.Location{
ID: uint64(len(locs) + 1),
Address: f.pc,
Line: []profile.Line{
profile.Line{
Function: fn,
Line: 1,
},
},
}
locs[f.pc] = loc
}
stack.frames[i] = loc
}
stacks[stkHash] = stack
}
sample := p.samples[stkHash]
if sample == nil {
sample = &Sample{
run: run,
stack: stack,
}
p.samples[stkHash] = sample
}
if sample.run != run |
sample.n++
p.n++
}
}
done <- s.Err()
}()
if err := perf.Start(); err != nil {
failf("failed to start perf: %v", err)
}
errOutput, _ := ioutil.ReadAll(perfOutErr)
if err := perf.Wait(); err != nil {
if false {
failf("perf failed: %v\n%s", err, errOutput)
}
}
if err := <-done; err != nil {
failf("failed to parse perf output: %v", | {
fmt.Fprintf(os.Stderr, "misaccounted sample: %v -> %v\n", run, sample.run)
} | conditional_block |
main.go | intel_pmu_enable_all
ffffffff81029ca4 x86_pmu_enable
ffffffff81143487 perf_pmu_enable
ffffffff81027d8a x86_pmu_commit_txn
ffffffff81143f00 group_sched_in
ffffffff811443c2 __perf_event_enable
ffffffff81140000 remote_function
ffffffff810dcf60 generic_smp_call_function_single_interrupt
ffffffff81040cd7 smp_call_function_single_interrupt
ffffffff8173759d call_function_single_interrupt
ffffffff815d6c59 cpuidle_idle_call
ffffffff8101d3ee arch_cpu_idle
ffffffff810bf4f5 cpu_startup_entry
ffffffff8104160d start_secondary
*/
i := 0
for ; ln[i] < '0' || ln[i] > '9'; i++ {
}
pidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
pid, err := strconv.ParseUint(ln[pidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 8: %v '%v'\n", ln, ln[pidPos:i])
continue
}
if ln[i] != '/' {
fmt.Fprintf(os.Stderr, "failed to parse pid 9: %v\n", ln)
continue
}
i++
tidPos := i
for ; ln[i] >= '0' && ln[i] <= '9'; i++ {
}
tid, err := strconv.ParseUint(ln[tidPos:i], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse pid 10: %v\n", ln)
continue
}
tids[tid] = pid
if *flagPid != 0 && uint64(*flagPid) != pid {
continue
}
p := getProc(int(pid))
if !*flagInit && !p.multithreaded {
continue
}
run := p.run
if run == 0 {
run = 1 // somehow it happens
}
p.load[run]++
frames := parseStack(s)
frames = append(frames, &Frame{uint64(run), fmt.Sprintf("LOAD %v", run)})
stkHash := hashStack(frames)
stack := stacks[stkHash]
if stack == nil {
stack = &Stack{
frames: make([]*profile.Location, len(frames)),
}
for i, f := range frames {
loc := locs[f.pc]
if loc == nil {
fn := funcs[f.fn]
if fn == nil {
fname := string(append([]byte{}, f.fn...))
fn = &profile.Function{
ID: uint64(len(funcs) + 1),
Name: fname,
SystemName: fname,
}
funcs[fname] = fn
}
loc = &profile.Location{
ID: uint64(len(locs) + 1),
Address: f.pc,
Line: []profile.Line{
profile.Line{
Function: fn,
Line: 1,
},
},
}
locs[f.pc] = loc
}
stack.frames[i] = loc
}
stacks[stkHash] = stack
}
sample := p.samples[stkHash]
if sample == nil {
sample = &Sample{
run: run,
stack: stack,
}
p.samples[stkHash] = sample
}
if sample.run != run {
fmt.Fprintf(os.Stderr, "misaccounted sample: %v -> %v\n", run, sample.run)
}
sample.n++
p.n++
}
}
done <- s.Err()
}()
if err := perf.Start(); err != nil {
failf("failed to start perf: %v", err)
}
errOutput, _ := ioutil.ReadAll(perfOutErr)
if err := perf.Wait(); err != nil {
if false {
failf("perf failed: %v\n%s", err, errOutput)
}
}
if err := <-done; err != nil {
failf("failed to parse perf output: %v", err)
}
var proc *Proc
max := 0
for _, p := range procs {
if max < p.n {
max = p.n
proc = p
}
}
maxRun := 0
for run := range proc.load {
if maxRun < run {
maxRun = run
}
}
if *flagRealtime {
proc.n = 0
proc.load = make(map[int]int)
for _, s := range proc.samples {
s.n = int(float64(s.n) * float64(maxRun) / float64(s.run))
if s.n < 0 {
println("underflow:", s.n, maxRun, s.run, int(float64(s.n)*float64(maxRun)/float64(s.run)))
}
if proc.n > proc.n+s.n {
println("overflow:", proc.n, s.n, s.run)
}
proc.n += s.n
proc.load[s.run] += s.n
}
}
maxN := 0
total := 0
totalLoad := 0
load := make([]int, maxRun+1)
for run, n := range proc.load {
load[run] = n
total += n
totalLoad += run * n
if maxN < n {
maxN = n
}
}
fmt.Printf("pid=%v samples=%v avgload=%.1f\n", proc.pid, proc.n, float64(totalLoad)/float64(total))
for run, n := range load {
if run == 0 {
continue
}
fmt.Printf("%2v [%5.2f%%]: %v\n", run, float64(n)/float64(total)*100, strings.Repeat("*", int(float64(n)/float64(maxN)*100+0.5)))
}
p := &profile.Profile{
Period: 250000,
PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"},
SampleType: []*profile.ValueType{
{Type: "samples", Unit: "count"},
{Type: "cpu", Unit: "nanoseconds"},
},
}
locs := make(map[uint64]bool)
funcs := make(map[uint64]bool)
for _, s := range proc.samples {
if *flagCpu > 0 && *flagCpu != s.run {
continue
}
p.Sample = append(p.Sample, &profile.Sample{
Value: []int64{int64(s.n), int64(s.n) * p.Period},
Location: s.stack.frames,
})
for _, loc := range s.stack.frames {
if !locs[loc.ID] {
locs[loc.ID] = true
p.Location = append(p.Location, loc)
}
for _, line := range loc.Line {
if !funcs[line.Function.ID] {
funcs[line.Function.ID] = true
p.Function = append(p.Function, line.Function)
}
}
}
}
buff := bufio.NewWriter(f)
p.Write(buff)
buff.Flush()
f.Close()
exec.Command("go", "tool", "pprof", "-web", "-nodefraction=0.001", "-edgefraction=0.001", f.Name()).Run()
}
func parseStack(s *bufio.Scanner) []*Frame {
var frames []*Frame
for s.Scan() && s.Text() != "" {
ln := s.Text()
i := 0
for ; ln[i] == ' ' || ln[i] == '\t'; i++ {
}
pos := i
for ; ln[i] != ' ' && ln[i] != '\t'; i++ {
}
pc, err := strconv.ParseUint(ln[pos:i], 16, 64)
if err != nil {
break
}
fn := ln[i+1:]
frames = append(frames, &Frame{pc, fn})
}
return frames
}
func hashStack(frames []*Frame) uint64 {
buf := new(bytes.Buffer)
for _, f := range frames {
binary.Write(buf, binary.LittleEndian, f.pc)
}
s := sha1.Sum(buf.Bytes())
r := bytes.NewReader(s[:])
var id uint64
binary.Read(r, binary.LittleEndian, &id)
return id
}
func failf(what string, args ...interface{}) | {
fmt.Fprintf(os.Stderr, what+"\n", args...)
os.Exit(1)
} | identifier_body |
|
simulation2_01.py | 90480378
x = float(Xcoordinate)
y = float(Ycoordinate)
#Randomly select the origin point along the linear vent
rand_index = randrange(0,10)
xorigin, yorigin = (xpt[rand_index], ypt[rand_index])
distance = check_topography(dtm, xorigin, yorigin, x+xorigin, y+yorigin, distance,elevation, dev, gtinv)
if distance[1] == True:
x = (distance[0] * math.sin(azimuth * math.pi/180))
y = (distance[0] * math.cos(azimuth* math.pi/180))
#Convert back to degrees
x /= 100
x *= 0.003297790480378
y /= 100
y *= 0.003297790480378
else:
pass
xarr[i][index] = x+xorigin
yarr[i][index] = y+yorigin
def calc_height(distance, ejectionangle, g, ejectionvelocity):
'''
height@x = initital_height + distance(tan(theta)) - ((g(x^2))/(2(v(cos(theta))^2))
initial_height = 0, a planar surface is fit to some reference elevation.
distance is in meters
angle is in radians
'''
trajectory = numpy.linspace(0,distance, distance/100,endpoint=True )
elevation = (trajectory * math.tan(ejectionangle)) - ((g*(trajectory**2)) / (2*((ejectionvelocity * math.cos(ejectionangle))**2)))
return elevation
def calc_distance():
g = 1.6249
#Calculate the ejection angle randomly from a range
ejectionangle = uniform(angle[0],angle[1])
ejectionangle *= math.pi/180 #Convert to radians
theta = math.sin(2*ejectionangle)
#Determine the ejection velocity randomly from a range
ejectionvelocity = uniform(velocity[0], velocity[1])
v2 = ejectionvelocity * ejectionvelocity
#Calculate total theoretical travel distance
distance = (v2 * theta) / g
#Calculate the elevation over a planar surface
elevation = calc_height(distance, ejectionangle, g, ejectionvelocity)
return distance, ejectionangle, elevation
def stromboli2():
'''distance = (velocity^2*(sin(2theta))) / gravity'''
p = 0
while p <= num:
p+=1
g = 1.6249 #Gravitational acceleration on the moon
distance, angle, elevation = calc_distance()
azimuth = random_azimuth()
Xcoordinate = distance * math.sin(azimuth * math.pi/180) #Conversion to radians
Ycoordinate = distance * math.cos(azimuth* math.pi/180)
#The WAC visible spectrum data is 100mpp or 0.003297790480378 degrees / pixel.
Xcoordinate /= 100
Xcoordinate *= 0.003297790480378
Ycoordinate /= 100
Ycoordinate *= 0.003297790480378
yield Xcoordinate, Ycoordinate, angle, azimuth, elevation, distance
if p > num:
done = False
yield done
def check_topography(dtm, originx, originy, destx, desty, distance,elevation, dev, gtinv):
'''
This function checks for impact due to variation in topography by
mimicing the functionality of a topographic profile from polyline.
1. Generate 2 arrays. One of X coordinates and one of Y coordinates
2. Transform these from GCS to PCS
3. Create a new array with the elevations extracted from the dtm
4. Compare it to the analytical trajectory heights
5. If the impact occurs before total potential travel distance,
drop the projectile there. If not, place it at the total possible
travel distance.
Parameters
----------
dtm: A digital terrain model, in 16bit, storing terrain elevation, ndarray
originx: The x coord of the projectile launch, scalar
originy: The y coord of the projectile launch, scalar
destx: The x landing coordinate on a flat plane, scalar
desty: The y landing coordinate on a flat plane, scalar
distance: The total possible distance traveled, scalar
elevation: An array storing heights above 0 of the projectile at some
interval (100m by default)
dev: Geotransform parameters
gtinv: Inverse geotransform parameters
Returns
-------
distance: The new distance the projectile has traveled if it impacts
the topography.
ToDo:
I should grab an elevation line longer than total possible distance. On a planar surface the object lands at total length. On a surface with increasing slope it lands early;later on a downward slope. We do not test for downward slope.
'''
#Extract the elevation from the dtm along the vector
#We add 5km to distance as total theoretical distance may be exceeded by
# downward sloping terrain
xpt = numpy.linspace(originx,destx,num=(distance)/100, endpoint=True)
ypt = numpy.linspace(originy,desty,num=(distance)/100, endpoint=True)
xpt -= geotransform[0]
ypt -= geotransform[3]
xsam = numpy.round_((gtinv[1] *xpt + gtinv[2] * ypt), decimals=0)
ylin = numpy.round_((gtinv[4] *xpt + gtinv[5] * ypt), decimals=0)
try:
dtmvector = dtm[ylin.astype(int),xsam.astype(int)]
#Compute elevation of projectile from a plane at the origin height
dtmvectormin = dtmvector.min()
elevation -= abs(dtmvector[0])
#Compare the projectile elevation to the dtm
dtmvector += abs(dtmvectormin)
elevation -= dtmvector
elevation += dtmvectormin
#Ignore the first 2.5km of ejection distance to ensure that we get a valid elevation check.
impact = numpy.where(elevation[250:] <= 0)
try:
#We are working at 100mpp, so the new distance is index +1
return ((impact[0][0])+1) * 100, True
except:
return False
except:
print "Total distance travel exceeds model dimensions."
def density(m, xdata, ydata, shapefile, ppg):
'''
This function converts the lat/lon of the input map to meters
assuming an equirectangular projection. It then creates a grid at | If the shapefile flag is set to true a shapefile is created by calling
the shapefile function.
Parameters:
m: A basemap mapping object
xdata: An array of x landing coordinates, ndarray
ydata: An array of y landing coordinates, ndarray
shapefile: A flag on whether or not to generate a shapefile
ppg: The number of meters per grid cell * 100
'''
#Convert from DD to m to create a mesh grid.
xmax = (m.xmax) / 0.003297790480378
xmin = (m.xmin) / 0.003297790480378
ymax = (m.ymax) / 0.003297790480378
ymin = (m.ymin) / 0.003297790480378
#Base 100mpp
nx = 1516 / int(ppg)
ny = 2123 / int(ppg)
#Convert to numpy arrays
xdata = numpy.asarray(xdata)
ydata = numpy.asarray(ydata)
#Bin the data & calculate the density
lon_bins = numpy.linspace(xdata.min(), xdata.max(), nx+1)
lat_bins = numpy.linspace(ydata.min(), ydata.max(), ny+1)
density, _, _ = numpy.histogram2d(ydata, xdata, [lat_bins, lon_bins])
#If the user wants a shapefile, pass the numpy arrays
if shapefile != None:
print "Writing model output to a shapefile."
create_shapefile(xdata, ydata, shapefile)
#Create a grid of equally spaced polygons
lon_bins_2d, lat_bins_2d = numpy.meshgrid(lon_bins, lat_bins | 100mpp, bins the input data into the grid (density) and creates a
histogram. Finally, a mesh grid is created and the histogram is
plotted in 2D over the basemap.
| random_line_split |
simulation2_01.py |
4. Compare it to the analytical trajectory heights
5. If the impact occurs before total potential travel distance,
drop the projectile there. If not, place it at the total possible
travel distance.
Parameters
----------
dtm: A digital terrain model, in 16bit, storing terrain elevation, ndarray
originx: The x coord of the projectile launch, scalar
originy: The y coord of the projectile launch, scalar
destx: The x landing coordinate on a flat plane, scalar
desty: The y landing coordinate on a flat plane, scalar
distance: The total possible distance traveled, scalar
elevation: An array storing heights above 0 of the projectile at some
interval (100m by default)
dev: Geotransform parameters
gtinv: Inverse geotransform parameters
Returns
-------
distance: The new distance the projectile has traveled if it impacts
the topography.
ToDo:
I should grab an elevation line longer than total possible distance. On a planar surface the object lands at total length. On a surface with increasing slope it lands early;later on a downward slope. We do not test for downward slope.
'''
#Extract the elevation from the dtm along the vector
#We add 5km to distance as total theoretical distance may be exceeded by
# downward sloping terrain
xpt = numpy.linspace(originx,destx,num=(distance)/100, endpoint=True)
ypt = numpy.linspace(originy,desty,num=(distance)/100, endpoint=True)
xpt -= geotransform[0]
ypt -= geotransform[3]
xsam = numpy.round_((gtinv[1] *xpt + gtinv[2] * ypt), decimals=0)
ylin = numpy.round_((gtinv[4] *xpt + gtinv[5] * ypt), decimals=0)
try:
dtmvector = dtm[ylin.astype(int),xsam.astype(int)]
#Compute elevation of projectile from a plane at the origin height
dtmvectormin = dtmvector.min()
elevation -= abs(dtmvector[0])
#Compare the projectile elevation to the dtm
dtmvector += abs(dtmvectormin)
elevation -= dtmvector
elevation += dtmvectormin
#Ignore the first 2.5km of ejection distance to ensure that we get a valid elevation check.
impact = numpy.where(elevation[250:] <= 0)
try:
#We are working at 100mpp, so the new distance is index +1
return ((impact[0][0])+1) * 100, True
except:
return False
except:
print "Total distance travel exceeds model dimensions."
def density(m, xdata, ydata, shapefile, ppg):
'''
This function converts the lat/lon of the input map to meters
assuming an equirectangular projection. It then creates a grid at
100mpp, bins the input data into the grid (density) and creates a
histogram. Finally, a mesh grid is created and the histogram is
plotted in 2D over the basemap.
If the shapefile flag is set to true a shapefile is created by calling
the shapefile function.
Parameters:
m: A basemap mapping object
xdata: An array of x landing coordinates, ndarray
ydata: An array of y landing coordinates, ndarray
shapefile: A flag on whether or not to generate a shapefile
ppg: The number of meters per grid cell * 100
'''
#Convert from DD to m to create a mesh grid.
xmax = (m.xmax) / 0.003297790480378
xmin = (m.xmin) / 0.003297790480378
ymax = (m.ymax) / 0.003297790480378
ymin = (m.ymin) / 0.003297790480378
#Base 100mpp
nx = 1516 / int(ppg)
ny = 2123 / int(ppg)
#Convert to numpy arrays
xdata = numpy.asarray(xdata)
ydata = numpy.asarray(ydata)
#Bin the data & calculate the density
lon_bins = numpy.linspace(xdata.min(), xdata.max(), nx+1)
lat_bins = numpy.linspace(ydata.min(), ydata.max(), ny+1)
density, _, _ = numpy.histogram2d(ydata, xdata, [lat_bins, lon_bins])
#If the user wants a shapefile, pass the numpy arrays
if shapefile != None:
print "Writing model output to a shapefile."
create_shapefile(xdata, ydata, shapefile)
#Create a grid of equally spaced polygons
lon_bins_2d, lat_bins_2d = numpy.meshgrid(lon_bins, lat_bins)
if density.max() <= 3:
maxden = 5
else:
maxden = density.max()
#Mask the density array so that 0 is not plotted
density = numpy.ma.masked_where(density <=0, density)
plt.pcolormesh(lon_bins_2d,lat_bins_2d, density, cmap=cm.RdYlGn_r, vmin=0, vmax=maxden, alpha=0.5)
plt.colorbar(orientation='horizontal')
if __name__ == '__main__':
'''This is the main section which handles program flow.'''
#Parse all of the arguments.
parser = argparse.ArgumentParser(description='Stromboli Ejection Simulation Tool v1')
parser.add_argument('--velocity', '-v', action='store',nargs='+',default=[350,425], dest='velocity', help='A range of ejection velocities. ')
parser.add_argument('--angle','-a', action='store', nargs='+',default=[30, 60], dest='angle', help='Optional: A range of ejection angles. Example: -a 30 60')
parser.add_argument('-i', '--iterations', action='store', type=int, dest='i',default=500, help='The number of ejection iterations to perform.')
parser.add_argument('--shapefile', action='store',nargs=1, default=None, dest='shapefile', help='Use this flag to generate a shapefile, in Moon_2000GCS, of the point data.')
parser.add_argument('--fast', action='store', default=None, nargs=1, dest='multi', help='Use this flag to forgo creating a visualization and just create a shapefile. This uses all available processing cores and is substantially faster.')
parser.add_argument('--ppg', action='store', default=10, dest='ppg', help='The number of pixels per grid cell. Default is 10, which generates a 1000m grid square using 100mpp WAC Vis.')
args = parser.parse_args()
#Assign the user variables to the globals, not great form, but it works.
try:
velocity = [float(args.velocity[0]),float(args.velocity[1])]
except:
velocity = [float(args.velocity[0]),float(args.velocity[0])]
num = args.i
try:
angle = [float(args.angle[0]),float(args.angle[1])]
except:
angle = [float(args.angle[0]),float(args.angle[0])]
#Read the input DTM and get geotransformation info
ds = gdal.Open('wac_dtm.tif')
dtm = ds.ReadAsArray()
geotransform = ds.GetGeoTransform()
dev = (geotransform[1]*geotransform[5] - geotransform[2]*geotransform[4])
gtinv = ( geotransform[0] , geotransform[5]/dev, - geotransform[2]/dev, geotransform[3], - geotransform[4]/dev, geotransform[1]/dev)
#Set the approximate ejection coordinates
xpt = numpy.linspace(-97.788,-97.855,num=10, endpoint=True)
ypt = numpy.linspace(-30.263,-29.851,num=10, endpoint=True)
#If the user wants to process quickly then we omit the visualization and multiprocess to generate a shapefile
if args.multi is not None:
import multiprocessing
cores = multiprocessing.cpu_count()
cores *= 2
step = num // cores
xarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num))
yarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num))
init(xarray,yarray)
jobs = []
for i in range(0, num+1, step):
| p = multiprocessing.Process(target=strom_multi, args=(xarr,yarr,slice(i, i+step)), )
jobs.append(p) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.