file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
token.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import mysql.connector
#from token find userId
#return 0 for error
def | (userToken, cnx):
userQuery = 'SELECT user_id FROM user_token WHERE user_token = %s'
try:
userCursor = cnx.cursor()
userCursor.execute(userQuery, (userToken, ))
return userCursor.fetchone()
#return 0 for db error
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
return '0'
finally:
userCursor.close()
#create new token
#return 1 for success
#return 0 for error
def addToken(userId, userToken, cnx):
addQuery = 'INSERT INTO user_token (user_id, user_token) VALUES (%s, %s) ON DUPLICATE KEY UPDATE user_token = %s'
try:
addCursor = cnx.cursor()
addCursor.execute(addQuery, (userId, userToken, userToken))
cnx.commit()
return '1'
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
cnx.rollback()
return '0'
finally:
addCursor.close()
#delete token
#return 1 for success
#return 0 for fail
def deleteToken(userId, cnx):
cleanQuery = 'DELETE FROM user_token WHERE user_id = %s'
try:
cleanCursor = cnx.cursor()
cleanCursor.execute(cleanQuery, (userId, ))
cnx.commit()
return '1'
except mysql.connector.Error as err:
cnx.rollback()
print('Something went wrong: {}'.format(err))
return '0'
finally:
cleanCursor.close()
| findUser | identifier_name |
client.js | var crypto = require('crypto');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
var pgPass = require('pgpass');
var TypeOverrides = require('./type-overrides');
var ConnectionParameters = require('./connection-parameters');
var Query = require('./query');
var defaults = require('./defaults');
var Connection = require('./connection');
var Client = function(config) {
EventEmitter.call(this);
this.connectionParameters = new ConnectionParameters(config);
this.user = this.connectionParameters.user;
this.database = this.connectionParameters.database;
this.port = this.connectionParameters.port;
this.host = this.connectionParameters.host;
this.password = this.connectionParameters.password;
var c = config || {};
this._types = new TypeOverrides(c.types);
this.connection = c.connection || new Connection({
stream: c.stream,
ssl: this.connectionParameters.ssl
});
this.queryQueue = [];
this.binary = c.binary || defaults.binary;
this.encoding = 'utf8';
this.processID = null;
this.secretKey = null;
this.ssl = this.connectionParameters.ssl || false;
};
util.inherits(Client, EventEmitter);
Client.prototype.connect = function(callback) {
var self = this;
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send startup message
con.on('connect', function() {
if(self.ssl) {
con.requestSsl();
} else {
con.startup(self.getStartupConf());
}
});
con.on('sslconnect', function() {
con.startup(self.getStartupConf());
});
function checkPgPass(cb) |
//password request handling
con.on('authenticationCleartextPassword', checkPgPass(function() {
con.password(self.password);
}));
//password request handling
con.on('authenticationMD5Password', checkPgPass(function(msg) {
var inner = Client.md5(self.password + self.user);
var outer = Client.md5(Buffer.concat([new Buffer(inner), msg.salt]));
var md5password = "md5" + outer;
con.password(md5password);
}));
con.once('backendKeyData', function(msg) {
self.processID = msg.processID;
self.secretKey = msg.secretKey;
});
//hook up query handling events to connection
//after the connection initially becomes ready for queries
con.once('readyForQuery', function() {
//delegate rowDescription to active query
con.on('rowDescription', function(msg) {
self.activeQuery.handleRowDescription(msg);
});
//delegate dataRow to active query
con.on('dataRow', function(msg) {
self.activeQuery.handleDataRow(msg);
});
//delegate portalSuspended to active query
con.on('portalSuspended', function(msg) {
self.activeQuery.handlePortalSuspended(con);
});
//deletagate emptyQuery to active query
con.on('emptyQuery', function(msg) {
self.activeQuery.handleEmptyQuery(con);
});
//delegate commandComplete to active query
con.on('commandComplete', function(msg) {
self.activeQuery.handleCommandComplete(msg, con);
});
//if a prepared statement has a name and properly parses
//we track that its already been executed so we don't parse
//it again on the same client
con.on('parseComplete', function(msg) {
if(self.activeQuery.name) {
con.parsedStatements[self.activeQuery.name] = true;
}
});
con.on('copyInResponse', function(msg) {
self.activeQuery.handleCopyInResponse(self.connection);
});
con.on('copyData', function (msg) {
self.activeQuery.handleCopyData(msg, self.connection);
});
con.on('notification', function(msg) {
self.emit('notification', msg);
});
//process possible callback argument to Client#connect
if (callback) {
callback(null, self);
//remove callback for proper error handling
//after the connect event
callback = null;
}
self.emit('connect');
});
con.on('readyForQuery', function() {
var activeQuery = self.activeQuery;
self.activeQuery = null;
self.readyForQuery = true;
self._pulseQueryQueue();
if(activeQuery) {
activeQuery.handleReadyForQuery();
}
});
con.on('error', function(error) {
if(self.activeQuery) {
var activeQuery = self.activeQuery;
self.activeQuery = null;
return activeQuery.handleError(error, con);
}
if(!callback) {
return self.emit('error', error);
}
callback(error);
callback = null;
});
con.once('end', function() {
if ( callback ) {
// haven't received a connection message yet !
var err = new Error('Connection terminated');
callback(err);
callback = null;
return;
}
if(self.activeQuery) {
var disconnectError = new Error('Connection terminated');
self.activeQuery.handleError(disconnectError, con);
self.activeQuery = null;
}
self.emit('end');
});
con.on('notice', function(msg) {
self.emit('notice', msg);
});
};
Client.prototype.getStartupConf = function() {
var params = this.connectionParameters;
var data = {
user: params.user,
database: params.database
};
var appName = params.application_name || params.fallback_application_name;
if (appName) {
data.application_name = appName;
}
return data;
};
Client.prototype.cancel = function(client, query) {
if(client.activeQuery == query) {
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send cancel message
con.on('connect', function() {
con.cancel(client.processID, client.secretKey);
});
} else if(client.queryQueue.indexOf(query) != -1) {
client.queryQueue.splice(client.queryQueue.indexOf(query), 1);
}
};
Client.prototype.setTypeParser = function(oid, format, parseFn) {
return this._types.setTypeParser(oid, format, parseFn);
};
Client.prototype.getTypeParser = function(oid, format) {
return this._types.getTypeParser(oid, format);
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeIdentifier = function(str) {
var escaped = '"';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '"') {
escaped += c + c;
} else {
escaped += c;
}
}
escaped += '"';
return escaped;
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeLiteral = function(str) {
var hasBackslash = false;
var escaped = '\'';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '\'') {
escaped += c + c;
} else if (c === '\\') {
escaped += c + c;
hasBackslash = true;
} else {
escaped += c;
}
}
escaped += '\'';
if(hasBackslash === true) {
escaped = ' E' + escaped;
}
return escaped;
};
Client.prototype._pulseQueryQueue = function() {
if(this.readyForQuery===true) {
this.activeQuery = this.queryQueue.shift();
if(this.activeQuery) {
this.readyForQuery = false;
this.hasExecuted = true;
this.activeQuery.submit(this.connection);
} else if(this.hasExecuted) {
this.activeQuery = null;
this.emit('drain');
}
}
};
Client.prototype.copyFrom = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.copyTo = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.query = function(config, values, callback) {
//can take in strings, config object or query object
var query = (typeof config.submit == 'function') ? config :
new Query(config, values, callback);
if(this.binary && !query.binary) {
query.binary = true;
}
if(query._result) {
query._result._getTypeParser = this._types.getTypeParser.bind(this._types);
}
this.queryQueue.push(query);
this._pulseQueryQueue();
return query;
};
Client.prototype.end = function() {
this.connection.end();
};
Client.md5 = function(string) {
return crypto.createHash('md5').update(string).digest('hex');
};
// expose a Query constructor
Client.Query = Query;
module.exports = Client;
| {
return function(msg) {
if (null !== self.password) {
cb(msg);
} else {
pgPass(self.connectionParameters, function(pass){
if (undefined !== pass) {
self.connectionParameters.password = self.password = pass;
}
cb(msg);
});
}
};
} | identifier_body |
client.js | var crypto = require('crypto');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
var pgPass = require('pgpass');
var TypeOverrides = require('./type-overrides');
var ConnectionParameters = require('./connection-parameters');
var Query = require('./query');
var defaults = require('./defaults');
var Connection = require('./connection');
var Client = function(config) {
EventEmitter.call(this);
this.connectionParameters = new ConnectionParameters(config);
this.user = this.connectionParameters.user;
this.database = this.connectionParameters.database;
this.port = this.connectionParameters.port;
this.host = this.connectionParameters.host;
this.password = this.connectionParameters.password;
var c = config || {};
this._types = new TypeOverrides(c.types);
this.connection = c.connection || new Connection({
stream: c.stream,
ssl: this.connectionParameters.ssl
});
this.queryQueue = [];
this.binary = c.binary || defaults.binary;
this.encoding = 'utf8';
this.processID = null;
this.secretKey = null;
this.ssl = this.connectionParameters.ssl || false;
};
util.inherits(Client, EventEmitter);
Client.prototype.connect = function(callback) {
var self = this;
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send startup message
con.on('connect', function() {
if(self.ssl) {
con.requestSsl();
} else {
con.startup(self.getStartupConf());
}
});
con.on('sslconnect', function() {
con.startup(self.getStartupConf());
});
function checkPgPass(cb) {
return function(msg) {
if (null !== self.password) {
cb(msg);
} else {
pgPass(self.connectionParameters, function(pass){
if (undefined !== pass) {
self.connectionParameters.password = self.password = pass;
}
cb(msg);
});
}
};
}
//password request handling
con.on('authenticationCleartextPassword', checkPgPass(function() {
con.password(self.password);
}));
//password request handling
con.on('authenticationMD5Password', checkPgPass(function(msg) {
var inner = Client.md5(self.password + self.user);
var outer = Client.md5(Buffer.concat([new Buffer(inner), msg.salt]));
var md5password = "md5" + outer;
con.password(md5password);
}));
con.once('backendKeyData', function(msg) {
self.processID = msg.processID;
self.secretKey = msg.secretKey;
});
//hook up query handling events to connection
//after the connection initially becomes ready for queries
con.once('readyForQuery', function() {
//delegate rowDescription to active query
con.on('rowDescription', function(msg) {
self.activeQuery.handleRowDescription(msg);
});
//delegate dataRow to active query
con.on('dataRow', function(msg) {
self.activeQuery.handleDataRow(msg);
});
//delegate portalSuspended to active query
con.on('portalSuspended', function(msg) {
self.activeQuery.handlePortalSuspended(con);
});
//deletagate emptyQuery to active query
con.on('emptyQuery', function(msg) {
self.activeQuery.handleEmptyQuery(con);
});
//delegate commandComplete to active query
con.on('commandComplete', function(msg) {
self.activeQuery.handleCommandComplete(msg, con);
});
//if a prepared statement has a name and properly parses
//we track that its already been executed so we don't parse
//it again on the same client
con.on('parseComplete', function(msg) {
if(self.activeQuery.name) {
con.parsedStatements[self.activeQuery.name] = true;
}
});
con.on('copyInResponse', function(msg) {
self.activeQuery.handleCopyInResponse(self.connection);
});
con.on('copyData', function (msg) {
self.activeQuery.handleCopyData(msg, self.connection);
});
con.on('notification', function(msg) {
self.emit('notification', msg);
});
//process possible callback argument to Client#connect
if (callback) {
callback(null, self);
//remove callback for proper error handling
//after the connect event
callback = null;
}
self.emit('connect');
});
con.on('readyForQuery', function() {
var activeQuery = self.activeQuery;
self.activeQuery = null;
self.readyForQuery = true;
self._pulseQueryQueue();
if(activeQuery) {
activeQuery.handleReadyForQuery();
}
});
con.on('error', function(error) {
if(self.activeQuery) {
var activeQuery = self.activeQuery;
self.activeQuery = null;
return activeQuery.handleError(error, con);
}
if(!callback) {
return self.emit('error', error);
}
callback(error);
callback = null;
});
con.once('end', function() {
if ( callback ) {
// haven't received a connection message yet !
var err = new Error('Connection terminated');
callback(err);
callback = null;
return;
}
if(self.activeQuery) {
var disconnectError = new Error('Connection terminated');
self.activeQuery.handleError(disconnectError, con);
self.activeQuery = null;
}
self.emit('end');
});
con.on('notice', function(msg) {
self.emit('notice', msg);
});
};
Client.prototype.getStartupConf = function() {
var params = this.connectionParameters;
var data = {
user: params.user,
database: params.database
};
var appName = params.application_name || params.fallback_application_name;
if (appName) {
data.application_name = appName;
}
return data;
};
Client.prototype.cancel = function(client, query) {
if(client.activeQuery == query) {
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send cancel message
con.on('connect', function() {
con.cancel(client.processID, client.secretKey);
});
} else if(client.queryQueue.indexOf(query) != -1) {
client.queryQueue.splice(client.queryQueue.indexOf(query), 1);
}
};
Client.prototype.setTypeParser = function(oid, format, parseFn) {
return this._types.setTypeParser(oid, format, parseFn);
};
Client.prototype.getTypeParser = function(oid, format) {
return this._types.getTypeParser(oid, format);
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeIdentifier = function(str) {
var escaped = '"';
for(var i = 0; i < str.length; i++) |
escaped += '"';
return escaped;
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeLiteral = function(str) {
var hasBackslash = false;
var escaped = '\'';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '\'') {
escaped += c + c;
} else if (c === '\\') {
escaped += c + c;
hasBackslash = true;
} else {
escaped += c;
}
}
escaped += '\'';
if(hasBackslash === true) {
escaped = ' E' + escaped;
}
return escaped;
};
Client.prototype._pulseQueryQueue = function() {
if(this.readyForQuery===true) {
this.activeQuery = this.queryQueue.shift();
if(this.activeQuery) {
this.readyForQuery = false;
this.hasExecuted = true;
this.activeQuery.submit(this.connection);
} else if(this.hasExecuted) {
this.activeQuery = null;
this.emit('drain');
}
}
};
Client.prototype.copyFrom = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.copyTo = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.query = function(config, values, callback) {
//can take in strings, config object or query object
var query = (typeof config.submit == 'function') ? config :
new Query(config, values, callback);
if(this.binary && !query.binary) {
query.binary = true;
}
if(query._result) {
query._result._getTypeParser = this._types.getTypeParser.bind(this._types);
}
this.queryQueue.push(query);
this._pulseQueryQueue();
return query;
};
Client.prototype.end = function() {
this.connection.end();
};
Client.md5 = function(string) {
return crypto.createHash('md5').update(string).digest('hex');
};
// expose a Query constructor
Client.Query = Query;
module.exports = Client;
| {
var c = str[i];
if(c === '"') {
escaped += c + c;
} else {
escaped += c;
}
} | conditional_block |
client.js | var crypto = require('crypto');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
var pgPass = require('pgpass');
var TypeOverrides = require('./type-overrides');
var ConnectionParameters = require('./connection-parameters');
var Query = require('./query');
var defaults = require('./defaults');
var Connection = require('./connection');
var Client = function(config) {
EventEmitter.call(this);
this.connectionParameters = new ConnectionParameters(config);
this.user = this.connectionParameters.user;
this.database = this.connectionParameters.database;
this.port = this.connectionParameters.port;
this.host = this.connectionParameters.host;
this.password = this.connectionParameters.password;
var c = config || {};
this._types = new TypeOverrides(c.types);
this.connection = c.connection || new Connection({
stream: c.stream,
ssl: this.connectionParameters.ssl
});
this.queryQueue = [];
this.binary = c.binary || defaults.binary;
this.encoding = 'utf8';
this.processID = null;
this.secretKey = null;
this.ssl = this.connectionParameters.ssl || false;
};
util.inherits(Client, EventEmitter);
Client.prototype.connect = function(callback) {
var self = this;
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send startup message
con.on('connect', function() {
if(self.ssl) {
con.requestSsl();
} else {
con.startup(self.getStartupConf());
}
});
con.on('sslconnect', function() {
con.startup(self.getStartupConf());
});
function | (cb) {
return function(msg) {
if (null !== self.password) {
cb(msg);
} else {
pgPass(self.connectionParameters, function(pass){
if (undefined !== pass) {
self.connectionParameters.password = self.password = pass;
}
cb(msg);
});
}
};
}
//password request handling
con.on('authenticationCleartextPassword', checkPgPass(function() {
con.password(self.password);
}));
//password request handling
con.on('authenticationMD5Password', checkPgPass(function(msg) {
var inner = Client.md5(self.password + self.user);
var outer = Client.md5(Buffer.concat([new Buffer(inner), msg.salt]));
var md5password = "md5" + outer;
con.password(md5password);
}));
con.once('backendKeyData', function(msg) {
self.processID = msg.processID;
self.secretKey = msg.secretKey;
});
//hook up query handling events to connection
//after the connection initially becomes ready for queries
con.once('readyForQuery', function() {
//delegate rowDescription to active query
con.on('rowDescription', function(msg) {
self.activeQuery.handleRowDescription(msg);
});
//delegate dataRow to active query
con.on('dataRow', function(msg) {
self.activeQuery.handleDataRow(msg);
});
//delegate portalSuspended to active query
con.on('portalSuspended', function(msg) {
self.activeQuery.handlePortalSuspended(con);
});
//deletagate emptyQuery to active query
con.on('emptyQuery', function(msg) {
self.activeQuery.handleEmptyQuery(con);
});
//delegate commandComplete to active query
con.on('commandComplete', function(msg) {
self.activeQuery.handleCommandComplete(msg, con);
});
//if a prepared statement has a name and properly parses
//we track that its already been executed so we don't parse
//it again on the same client
con.on('parseComplete', function(msg) {
if(self.activeQuery.name) {
con.parsedStatements[self.activeQuery.name] = true;
}
});
con.on('copyInResponse', function(msg) {
self.activeQuery.handleCopyInResponse(self.connection);
});
con.on('copyData', function (msg) {
self.activeQuery.handleCopyData(msg, self.connection);
});
con.on('notification', function(msg) {
self.emit('notification', msg);
});
//process possible callback argument to Client#connect
if (callback) {
callback(null, self);
//remove callback for proper error handling
//after the connect event
callback = null;
}
self.emit('connect');
});
con.on('readyForQuery', function() {
var activeQuery = self.activeQuery;
self.activeQuery = null;
self.readyForQuery = true;
self._pulseQueryQueue();
if(activeQuery) {
activeQuery.handleReadyForQuery();
}
});
con.on('error', function(error) {
if(self.activeQuery) {
var activeQuery = self.activeQuery;
self.activeQuery = null;
return activeQuery.handleError(error, con);
}
if(!callback) {
return self.emit('error', error);
}
callback(error);
callback = null;
});
con.once('end', function() {
if ( callback ) {
// haven't received a connection message yet !
var err = new Error('Connection terminated');
callback(err);
callback = null;
return;
}
if(self.activeQuery) {
var disconnectError = new Error('Connection terminated');
self.activeQuery.handleError(disconnectError, con);
self.activeQuery = null;
}
self.emit('end');
});
con.on('notice', function(msg) {
self.emit('notice', msg);
});
};
Client.prototype.getStartupConf = function() {
var params = this.connectionParameters;
var data = {
user: params.user,
database: params.database
};
var appName = params.application_name || params.fallback_application_name;
if (appName) {
data.application_name = appName;
}
return data;
};
Client.prototype.cancel = function(client, query) {
if(client.activeQuery == query) {
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send cancel message
con.on('connect', function() {
con.cancel(client.processID, client.secretKey);
});
} else if(client.queryQueue.indexOf(query) != -1) {
client.queryQueue.splice(client.queryQueue.indexOf(query), 1);
}
};
Client.prototype.setTypeParser = function(oid, format, parseFn) {
return this._types.setTypeParser(oid, format, parseFn);
};
Client.prototype.getTypeParser = function(oid, format) {
return this._types.getTypeParser(oid, format);
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeIdentifier = function(str) {
var escaped = '"';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '"') {
escaped += c + c;
} else {
escaped += c;
}
}
escaped += '"';
return escaped;
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeLiteral = function(str) {
var hasBackslash = false;
var escaped = '\'';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '\'') {
escaped += c + c;
} else if (c === '\\') {
escaped += c + c;
hasBackslash = true;
} else {
escaped += c;
}
}
escaped += '\'';
if(hasBackslash === true) {
escaped = ' E' + escaped;
}
return escaped;
};
Client.prototype._pulseQueryQueue = function() {
if(this.readyForQuery===true) {
this.activeQuery = this.queryQueue.shift();
if(this.activeQuery) {
this.readyForQuery = false;
this.hasExecuted = true;
this.activeQuery.submit(this.connection);
} else if(this.hasExecuted) {
this.activeQuery = null;
this.emit('drain');
}
}
};
Client.prototype.copyFrom = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.copyTo = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.query = function(config, values, callback) {
//can take in strings, config object or query object
var query = (typeof config.submit == 'function') ? config :
new Query(config, values, callback);
if(this.binary && !query.binary) {
query.binary = true;
}
if(query._result) {
query._result._getTypeParser = this._types.getTypeParser.bind(this._types);
}
this.queryQueue.push(query);
this._pulseQueryQueue();
return query;
};
Client.prototype.end = function() {
this.connection.end();
};
Client.md5 = function(string) {
return crypto.createHash('md5').update(string).digest('hex');
};
// expose a Query constructor
Client.Query = Query;
module.exports = Client;
| checkPgPass | identifier_name |
client.js | var crypto = require('crypto');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
var pgPass = require('pgpass');
var TypeOverrides = require('./type-overrides');
var ConnectionParameters = require('./connection-parameters');
var Query = require('./query');
var defaults = require('./defaults');
var Connection = require('./connection');
var Client = function(config) {
EventEmitter.call(this);
this.connectionParameters = new ConnectionParameters(config);
this.user = this.connectionParameters.user;
this.database = this.connectionParameters.database;
this.port = this.connectionParameters.port;
this.host = this.connectionParameters.host;
this.password = this.connectionParameters.password;
var c = config || {};
this._types = new TypeOverrides(c.types);
this.connection = c.connection || new Connection({
stream: c.stream,
ssl: this.connectionParameters.ssl
});
this.queryQueue = [];
this.binary = c.binary || defaults.binary;
this.encoding = 'utf8';
this.processID = null;
this.secretKey = null;
this.ssl = this.connectionParameters.ssl || false;
};
util.inherits(Client, EventEmitter);
Client.prototype.connect = function(callback) {
var self = this;
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send startup message
con.on('connect', function() {
if(self.ssl) {
con.requestSsl();
} else {
con.startup(self.getStartupConf());
}
});
con.on('sslconnect', function() {
con.startup(self.getStartupConf());
});
function checkPgPass(cb) {
return function(msg) {
if (null !== self.password) {
cb(msg);
} else {
pgPass(self.connectionParameters, function(pass){
if (undefined !== pass) {
self.connectionParameters.password = self.password = pass;
}
cb(msg);
});
}
};
}
//password request handling
con.on('authenticationCleartextPassword', checkPgPass(function() {
con.password(self.password);
}));
//password request handling
con.on('authenticationMD5Password', checkPgPass(function(msg) {
var inner = Client.md5(self.password + self.user);
var outer = Client.md5(Buffer.concat([new Buffer(inner), msg.salt]));
var md5password = "md5" + outer;
con.password(md5password);
}));
con.once('backendKeyData', function(msg) {
self.processID = msg.processID;
self.secretKey = msg.secretKey;
});
|
//delegate rowDescription to active query
con.on('rowDescription', function(msg) {
self.activeQuery.handleRowDescription(msg);
});
//delegate dataRow to active query
con.on('dataRow', function(msg) {
self.activeQuery.handleDataRow(msg);
});
//delegate portalSuspended to active query
con.on('portalSuspended', function(msg) {
self.activeQuery.handlePortalSuspended(con);
});
//deletagate emptyQuery to active query
con.on('emptyQuery', function(msg) {
self.activeQuery.handleEmptyQuery(con);
});
//delegate commandComplete to active query
con.on('commandComplete', function(msg) {
self.activeQuery.handleCommandComplete(msg, con);
});
//if a prepared statement has a name and properly parses
//we track that its already been executed so we don't parse
//it again on the same client
con.on('parseComplete', function(msg) {
if(self.activeQuery.name) {
con.parsedStatements[self.activeQuery.name] = true;
}
});
con.on('copyInResponse', function(msg) {
self.activeQuery.handleCopyInResponse(self.connection);
});
con.on('copyData', function (msg) {
self.activeQuery.handleCopyData(msg, self.connection);
});
con.on('notification', function(msg) {
self.emit('notification', msg);
});
//process possible callback argument to Client#connect
if (callback) {
callback(null, self);
//remove callback for proper error handling
//after the connect event
callback = null;
}
self.emit('connect');
});
con.on('readyForQuery', function() {
var activeQuery = self.activeQuery;
self.activeQuery = null;
self.readyForQuery = true;
self._pulseQueryQueue();
if(activeQuery) {
activeQuery.handleReadyForQuery();
}
});
con.on('error', function(error) {
if(self.activeQuery) {
var activeQuery = self.activeQuery;
self.activeQuery = null;
return activeQuery.handleError(error, con);
}
if(!callback) {
return self.emit('error', error);
}
callback(error);
callback = null;
});
con.once('end', function() {
if ( callback ) {
// haven't received a connection message yet !
var err = new Error('Connection terminated');
callback(err);
callback = null;
return;
}
if(self.activeQuery) {
var disconnectError = new Error('Connection terminated');
self.activeQuery.handleError(disconnectError, con);
self.activeQuery = null;
}
self.emit('end');
});
con.on('notice', function(msg) {
self.emit('notice', msg);
});
};
Client.prototype.getStartupConf = function() {
var params = this.connectionParameters;
var data = {
user: params.user,
database: params.database
};
var appName = params.application_name || params.fallback_application_name;
if (appName) {
data.application_name = appName;
}
return data;
};
Client.prototype.cancel = function(client, query) {
if(client.activeQuery == query) {
var con = this.connection;
if(this.host && this.host.indexOf('/') === 0) {
con.connect(this.host + '/.s.PGSQL.' + this.port);
} else {
con.connect(this.port, this.host);
}
//once connection is established send cancel message
con.on('connect', function() {
con.cancel(client.processID, client.secretKey);
});
} else if(client.queryQueue.indexOf(query) != -1) {
client.queryQueue.splice(client.queryQueue.indexOf(query), 1);
}
};
Client.prototype.setTypeParser = function(oid, format, parseFn) {
return this._types.setTypeParser(oid, format, parseFn);
};
Client.prototype.getTypeParser = function(oid, format) {
return this._types.getTypeParser(oid, format);
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeIdentifier = function(str) {
var escaped = '"';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '"') {
escaped += c + c;
} else {
escaped += c;
}
}
escaped += '"';
return escaped;
};
// Ported from PostgreSQL 9.2.4 source code in src/interfaces/libpq/fe-exec.c
Client.prototype.escapeLiteral = function(str) {
var hasBackslash = false;
var escaped = '\'';
for(var i = 0; i < str.length; i++) {
var c = str[i];
if(c === '\'') {
escaped += c + c;
} else if (c === '\\') {
escaped += c + c;
hasBackslash = true;
} else {
escaped += c;
}
}
escaped += '\'';
if(hasBackslash === true) {
escaped = ' E' + escaped;
}
return escaped;
};
Client.prototype._pulseQueryQueue = function() {
if(this.readyForQuery===true) {
this.activeQuery = this.queryQueue.shift();
if(this.activeQuery) {
this.readyForQuery = false;
this.hasExecuted = true;
this.activeQuery.submit(this.connection);
} else if(this.hasExecuted) {
this.activeQuery = null;
this.emit('drain');
}
}
};
Client.prototype.copyFrom = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.copyTo = function (text) {
throw new Error("For PostgreSQL COPY TO/COPY FROM support npm install pg-copy-streams");
};
Client.prototype.query = function(config, values, callback) {
//can take in strings, config object or query object
var query = (typeof config.submit == 'function') ? config :
new Query(config, values, callback);
if(this.binary && !query.binary) {
query.binary = true;
}
if(query._result) {
query._result._getTypeParser = this._types.getTypeParser.bind(this._types);
}
this.queryQueue.push(query);
this._pulseQueryQueue();
return query;
};
Client.prototype.end = function() {
this.connection.end();
};
Client.md5 = function(string) {
return crypto.createHash('md5').update(string).digest('hex');
};
// expose a Query constructor
Client.Query = Query;
module.exports = Client; | //hook up query handling events to connection
//after the connection initially becomes ready for queries
con.once('readyForQuery', function() { | random_line_split |
cell-does-not-clone.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
struct Foo {
x: int
}
impl Clone for Foo {
fn clone(&self) -> Foo {
// Using Cell in any way should never cause clone() to be
// invoked -- after all, that would permit evil user code to
// abuse `Cell` and trigger crashes.
panic!();
}
}
impl Copy for Foo {}
pub fn main() | {
let x = Cell::new(Foo { x: 22 });
let _y = x.get();
let _z = x.clone();
} | identifier_body |
|
cell-does-not-clone.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
struct Foo {
x: int
}
impl Clone for Foo {
fn | (&self) -> Foo {
// Using Cell in any way should never cause clone() to be
// invoked -- after all, that would permit evil user code to
// abuse `Cell` and trigger crashes.
panic!();
}
}
impl Copy for Foo {}
pub fn main() {
let x = Cell::new(Foo { x: 22 });
let _y = x.get();
let _z = x.clone();
}
| clone | identifier_name |
cell-does-not-clone.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
struct Foo {
x: int
}
impl Clone for Foo { | panic!();
}
}
impl Copy for Foo {}
pub fn main() {
let x = Cell::new(Foo { x: 22 });
let _y = x.get();
let _z = x.clone();
} | fn clone(&self) -> Foo {
// Using Cell in any way should never cause clone() to be
// invoked -- after all, that would permit evil user code to
// abuse `Cell` and trigger crashes.
| random_line_split |
arguments.rs | use std;
pub enum | {
Invalid,
Help,
Create(String, Vec<String>),
Extract(String),
List(String),
}
impl Arguments {
pub fn parseargs() -> Arguments {
enum Action { Create, Extract, List }
let mut action = None;
let mut archive: Option<String> = None;
let mut files: Vec<String> = Vec::new();
let mut args = std::env::args();
args.next();
while let Some(arg) = args.next() {
match arg.as_ref() {
"-h" => return Arguments::Help,
"-c" => action = Some(Action::Create),
"-x" => action = Some(Action::Extract),
"-l" => action = Some(Action::List),
"-f" => archive = args.next(),
_ => files.push(arg),
}
}
let archive = match archive {
None => return Arguments::Invalid,
Some(fname) => fname,
};
return match action {
None => Arguments::Invalid,
Some(Action::Create) => Arguments::Create(archive, files),
Some(Action::Extract) => Arguments::Extract(archive),
Some(Action::List) => Arguments::List(archive),
};
}
}
| Arguments | identifier_name |
arguments.rs | use std;
pub enum Arguments {
Invalid,
Help,
Create(String, Vec<String>),
Extract(String),
List(String),
}
impl Arguments {
pub fn parseargs() -> Arguments {
enum Action { Create, Extract, List }
let mut action = None;
let mut archive: Option<String> = None;
let mut files: Vec<String> = Vec::new();
let mut args = std::env::args();
args.next();
while let Some(arg) = args.next() {
match arg.as_ref() {
"-h" => return Arguments::Help,
"-c" => action = Some(Action::Create),
"-x" => action = Some(Action::Extract),
"-l" => action = Some(Action::List),
"-f" => archive = args.next(),
_ => files.push(arg),
}
} | let archive = match archive {
None => return Arguments::Invalid,
Some(fname) => fname,
};
return match action {
None => Arguments::Invalid,
Some(Action::Create) => Arguments::Create(archive, files),
Some(Action::Extract) => Arguments::Extract(archive),
Some(Action::List) => Arguments::List(archive),
};
}
} | random_line_split |
|
utils.py | #
# Utility stackables
#
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stackable import Stackable, StackableError
import json, pickle
from time import sleep
from threading import Thread, Event
from datetime import datetime, timedelta
class StackablePickler(Stackable):
'Pickle codec'
def process_input(self, data):
return pickle.loads(data)
def process_output(self, data):
return pickle.dumps(data, protocol=2)
class StackableJSON(Stackable):
'JSON codec'
def process_input(self, data):
try:
return json.loads(data)
except ValueError:
return None
def process_output(self, data):
return json.dumps(data)
class StackableWriter(Stackable):
'Reads and writes from/to a file'
def __init__(self, filename):
super(StackableWriter, self).__init__()
self.fd = open(filename, "w")
def process_input(self, data):
self.fd.write(data)
self.fd.flush()
def process_output(self, data):
return data
# def poll(self):
# return self.fd.read()
class StackablePrinter(Stackable):
'''Prints all input and output, and returns it unmodified.
Useful for quick debugging of Stackables.'''
def __init__(self, printer=print):
'Takes a printing function as argument - defaults to print'
self.printer = printer
super(StackablePrinter, self).__init__()
def process_input(self, data):
self.printer(data)
return data
def process_output(self, data):
self.printer(data)
return data
import sys
class StackableStdout(Stackable):
def process_input(self, data):
sys.stdout.write(data)
return data
def process_output(self, data):
return data
from collections import deque
class StackableInjector(Stackable):
def __init__(self):
super(StackableInjector, self).__init__()
self.in_buf = deque()
self.out_buf = deque()
def push(self, data):
self.in_buf.append(data)
def poll(self):
if len(self.in_buf):
return self.in_buf.popleft()
return None
def process_output(self, data):
self.out_buf.append(data)
return data
class StackablePoker(Stackable):
def | (self, interval=20, send=True, ping_string='__stack_ping', pong_string='__stack_pong'):
super(StackablePoker, self).__init__()
self.ping_string = ping_string.encode('utf-8')
self.pong_string = pong_string.encode('utf-8')
self.w = Event()
self.interval = interval
self.send = send
if self.send:
self.reset()
def _detach(self):
super(StackablePoker, self)._detach()
self.w.set()
def reset(self):
self.timestamp = datetime.now()
def ping():
self.w.wait(self.interval)
try:
self._feed(self.ping_string)
except:
pass
x = Thread(target=ping)
x.daemon = True
x.start()
def process_output(self, data):
if self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
def process_input(self, data):
if data == self.pong_string:
self.reset()
return None
elif data == self.ping_string:
self._feed(self.pong_string)
return None
elif self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
| __init__ | identifier_name |
utils.py | #
# Utility stackables
#
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stackable import Stackable, StackableError
import json, pickle
from time import sleep
from threading import Thread, Event
from datetime import datetime, timedelta
class StackablePickler(Stackable):
'Pickle codec'
def process_input(self, data):
return pickle.loads(data)
def process_output(self, data):
return pickle.dumps(data, protocol=2)
class StackableJSON(Stackable):
'JSON codec'
def process_input(self, data):
try:
return json.loads(data)
except ValueError:
return None
def process_output(self, data):
return json.dumps(data)
class StackableWriter(Stackable):
'Reads and writes from/to a file'
def __init__(self, filename):
super(StackableWriter, self).__init__()
self.fd = open(filename, "w")
def process_input(self, data):
self.fd.write(data)
self.fd.flush()
def process_output(self, data):
|
# def poll(self):
# return self.fd.read()
class StackablePrinter(Stackable):
'''Prints all input and output, and returns it unmodified.
Useful for quick debugging of Stackables.'''
def __init__(self, printer=print):
'Takes a printing function as argument - defaults to print'
self.printer = printer
super(StackablePrinter, self).__init__()
def process_input(self, data):
self.printer(data)
return data
def process_output(self, data):
self.printer(data)
return data
import sys
class StackableStdout(Stackable):
def process_input(self, data):
sys.stdout.write(data)
return data
def process_output(self, data):
return data
from collections import deque
class StackableInjector(Stackable):
def __init__(self):
super(StackableInjector, self).__init__()
self.in_buf = deque()
self.out_buf = deque()
def push(self, data):
self.in_buf.append(data)
def poll(self):
if len(self.in_buf):
return self.in_buf.popleft()
return None
def process_output(self, data):
self.out_buf.append(data)
return data
class StackablePoker(Stackable):
def __init__(self, interval=20, send=True, ping_string='__stack_ping', pong_string='__stack_pong'):
super(StackablePoker, self).__init__()
self.ping_string = ping_string.encode('utf-8')
self.pong_string = pong_string.encode('utf-8')
self.w = Event()
self.interval = interval
self.send = send
if self.send:
self.reset()
def _detach(self):
super(StackablePoker, self)._detach()
self.w.set()
def reset(self):
self.timestamp = datetime.now()
def ping():
self.w.wait(self.interval)
try:
self._feed(self.ping_string)
except:
pass
x = Thread(target=ping)
x.daemon = True
x.start()
def process_output(self, data):
if self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
def process_input(self, data):
if data == self.pong_string:
self.reset()
return None
elif data == self.ping_string:
self._feed(self.pong_string)
return None
elif self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
| return data | identifier_body |
utils.py | #
# Utility stackables
#
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stackable import Stackable, StackableError
import json, pickle
from time import sleep
from threading import Thread, Event
from datetime import datetime, timedelta
class StackablePickler(Stackable):
'Pickle codec'
def process_input(self, data):
return pickle.loads(data)
def process_output(self, data):
return pickle.dumps(data, protocol=2)
class StackableJSON(Stackable):
'JSON codec'
def process_input(self, data):
try:
return json.loads(data)
except ValueError:
return None
def process_output(self, data):
return json.dumps(data)
class StackableWriter(Stackable):
'Reads and writes from/to a file'
def __init__(self, filename):
super(StackableWriter, self).__init__()
self.fd = open(filename, "w")
def process_input(self, data):
self.fd.write(data)
self.fd.flush()
def process_output(self, data):
return data
# def poll(self):
# return self.fd.read()
class StackablePrinter(Stackable):
'''Prints all input and output, and returns it unmodified.
Useful for quick debugging of Stackables.'''
def __init__(self, printer=print):
'Takes a printing function as argument - defaults to print'
self.printer = printer
super(StackablePrinter, self).__init__()
def process_input(self, data):
self.printer(data)
return data
def process_output(self, data):
self.printer(data)
return data
import sys
class StackableStdout(Stackable):
def process_input(self, data):
sys.stdout.write(data)
return data
def process_output(self, data):
return data
from collections import deque
class StackableInjector(Stackable):
def __init__(self):
super(StackableInjector, self).__init__()
self.in_buf = deque()
self.out_buf = deque()
def push(self, data):
self.in_buf.append(data)
def poll(self):
if len(self.in_buf):
return self.in_buf.popleft()
return None
def process_output(self, data):
self.out_buf.append(data)
return data
class StackablePoker(Stackable):
def __init__(self, interval=20, send=True, ping_string='__stack_ping', pong_string='__stack_pong'):
super(StackablePoker, self).__init__()
self.ping_string = ping_string.encode('utf-8')
self.pong_string = pong_string.encode('utf-8')
self.w = Event()
self.interval = interval
self.send = send
if self.send:
self.reset()
def _detach(self):
super(StackablePoker, self)._detach()
self.w.set()
def reset(self):
self.timestamp = datetime.now()
def ping():
self.w.wait(self.interval)
try:
self._feed(self.ping_string)
except:
pass
x = Thread(target=ping)
x.daemon = True
x.start()
def process_output(self, data):
if self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
|
return data
def process_input(self, data):
if data == self.pong_string:
self.reset()
return None
elif data == self.ping_string:
self._feed(self.pong_string)
return None
elif self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
| raise StackableError('Pong not received') | conditional_block |
utils.py | #
# Utility stackables
#
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stackable import Stackable, StackableError
import json, pickle
from time import sleep
from threading import Thread, Event
from datetime import datetime, timedelta
class StackablePickler(Stackable):
'Pickle codec'
def process_input(self, data):
return pickle.loads(data)
def process_output(self, data):
return pickle.dumps(data, protocol=2)
class StackableJSON(Stackable):
'JSON codec'
def process_input(self, data):
try:
return json.loads(data)
except ValueError:
return None
def process_output(self, data):
return json.dumps(data)
class StackableWriter(Stackable):
'Reads and writes from/to a file'
def __init__(self, filename):
super(StackableWriter, self).__init__()
self.fd = open(filename, "w")
def process_input(self, data):
self.fd.write(data)
self.fd.flush()
def process_output(self, data):
return data
# def poll(self):
# return self.fd.read()
class StackablePrinter(Stackable):
'''Prints all input and output, and returns it unmodified.
Useful for quick debugging of Stackables.'''
def __init__(self, printer=print):
'Takes a printing function as argument - defaults to print'
self.printer = printer
super(StackablePrinter, self).__init__()
def process_input(self, data):
self.printer(data)
return data
def process_output(self, data):
self.printer(data)
return data
import sys
class StackableStdout(Stackable):
def process_input(self, data):
sys.stdout.write(data)
return data
def process_output(self, data):
return data
from collections import deque
class StackableInjector(Stackable):
def __init__(self):
super(StackableInjector, self).__init__()
self.in_buf = deque()
self.out_buf = deque()
def push(self, data):
self.in_buf.append(data)
def poll(self):
if len(self.in_buf):
return self.in_buf.popleft()
return None
def process_output(self, data):
self.out_buf.append(data)
return data
class StackablePoker(Stackable):
def __init__(self, interval=20, send=True, ping_string='__stack_ping', pong_string='__stack_pong'):
super(StackablePoker, self).__init__() | self.ping_string = ping_string.encode('utf-8')
self.pong_string = pong_string.encode('utf-8')
self.w = Event()
self.interval = interval
self.send = send
if self.send:
self.reset()
def _detach(self):
super(StackablePoker, self)._detach()
self.w.set()
def reset(self):
self.timestamp = datetime.now()
def ping():
self.w.wait(self.interval)
try:
self._feed(self.ping_string)
except:
pass
x = Thread(target=ping)
x.daemon = True
x.start()
def process_output(self, data):
if self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
def process_input(self, data):
if data == self.pong_string:
self.reset()
return None
elif data == self.ping_string:
self._feed(self.pong_string)
return None
elif self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data | random_line_split |
|
rmm_diis_old.py | """Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections') | RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG | self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G | random_line_split |
rmm_diis_old.py | """Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def | (a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG
| integrate | identifier_name |
rmm_diis_old.py | """Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
| """RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG | identifier_body |
|
rmm_diis_old.py | """Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
|
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG
| if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0 | conditional_block |
insert.rs | use database::Database;
use database::Errors;
use database::errors::log_n_wrap;
use database::Errors::{NotFound, Conflict};
use std::vec::Vec;
use serde_json::Value;
use serde_json;
use rand;
impl Database {
/// Inserts the record to the given path.
pub fn insert(&mut self, keys: &mut Vec<String>, value: Value) -> Result<Value, Errors> |
} | {
let data = &mut self.data;
if let Ok(obj) = Self::get_object(keys, data) {
// Path Found. It should be an array to accomplish an operation. Otherwise it must be an update not insert.
if let Some(ref mut array) = obj.as_array_mut() {
let mut id = rand::random();
// If id comes with the record use it.
if let Some(id_value) = value.get("id") {
if let Some(parsed) = id_value.as_i64() {
id = parsed;
}
}
let value_with_id = &mut value.clone();
if let Some(obj_id) = value_with_id.as_object_mut() {
obj_id.insert("id".to_string(), serde_json::to_value(id).unwrap());
}
// TODO: random id conflict must be resolved.
if let Some(idx) = Database::find_index(array, &id) {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error {:?}. \"id\" duplicates \
record at index: {:?}",
&value_with_id,
idx)))
} else {
array.push(value_with_id.clone());
info!(&self.logger, "Insert - Ok id: {:?}", &id);
debug!(&self.logger, "Insert - Value {}", &value_with_id);
Ok(value_with_id.clone())
}
} else {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error already has an object with the \
given key: {:?}",
keys)))
}
} else {
log_n_wrap(&self.logger,
NotFound(format!("Insert - Error {:?}. No record with the given path:",
keys)))
}
} | identifier_body |
insert.rs | use database::Database;
use database::Errors;
use database::errors::log_n_wrap;
use database::Errors::{NotFound, Conflict};
use std::vec::Vec;
use serde_json::Value;
use serde_json;
use rand;
impl Database {
/// Inserts the record to the given path.
pub fn | (&mut self, keys: &mut Vec<String>, value: Value) -> Result<Value, Errors> {
let data = &mut self.data;
if let Ok(obj) = Self::get_object(keys, data) {
// Path Found. It should be an array to accomplish an operation. Otherwise it must be an update not insert.
if let Some(ref mut array) = obj.as_array_mut() {
let mut id = rand::random();
// If id comes with the record use it.
if let Some(id_value) = value.get("id") {
if let Some(parsed) = id_value.as_i64() {
id = parsed;
}
}
let value_with_id = &mut value.clone();
if let Some(obj_id) = value_with_id.as_object_mut() {
obj_id.insert("id".to_string(), serde_json::to_value(id).unwrap());
}
// TODO: random id conflict must be resolved.
if let Some(idx) = Database::find_index(array, &id) {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error {:?}. \"id\" duplicates \
record at index: {:?}",
&value_with_id,
idx)))
} else {
array.push(value_with_id.clone());
info!(&self.logger, "Insert - Ok id: {:?}", &id);
debug!(&self.logger, "Insert - Value {}", &value_with_id);
Ok(value_with_id.clone())
}
} else {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error already has an object with the \
given key: {:?}",
keys)))
}
} else {
log_n_wrap(&self.logger,
NotFound(format!("Insert - Error {:?}. No record with the given path:",
keys)))
}
}
} | insert | identifier_name |
insert.rs | use database::Database;
use database::Errors;
use database::errors::log_n_wrap;
use database::Errors::{NotFound, Conflict};
use std::vec::Vec;
use serde_json::Value;
use serde_json;
use rand;
impl Database {
/// Inserts the record to the given path.
pub fn insert(&mut self, keys: &mut Vec<String>, value: Value) -> Result<Value, Errors> {
let data = &mut self.data;
if let Ok(obj) = Self::get_object(keys, data) {
// Path Found. It should be an array to accomplish an operation. Otherwise it must be an update not insert.
if let Some(ref mut array) = obj.as_array_mut() {
let mut id = rand::random();
// If id comes with the record use it.
if let Some(id_value) = value.get("id") {
if let Some(parsed) = id_value.as_i64() {
id = parsed;
}
}
let value_with_id = &mut value.clone();
if let Some(obj_id) = value_with_id.as_object_mut() {
obj_id.insert("id".to_string(), serde_json::to_value(id).unwrap());
}
// TODO: random id conflict must be resolved.
if let Some(idx) = Database::find_index(array, &id) {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error {:?}. \"id\" duplicates \
record at index: {:?}",
&value_with_id,
idx)))
} else {
array.push(value_with_id.clone());
info!(&self.logger, "Insert - Ok id: {:?}", &id);
debug!(&self.logger, "Insert - Value {}", &value_with_id);
Ok(value_with_id.clone())
}
} else {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error already has an object with the \
given key: {:?}",
keys)))
}
} else { | }
} | log_n_wrap(&self.logger,
NotFound(format!("Insert - Error {:?}. No record with the given path:",
keys)))
} | random_line_split |
insert.rs | use database::Database;
use database::Errors;
use database::errors::log_n_wrap;
use database::Errors::{NotFound, Conflict};
use std::vec::Vec;
use serde_json::Value;
use serde_json;
use rand;
impl Database {
/// Inserts the record to the given path.
pub fn insert(&mut self, keys: &mut Vec<String>, value: Value) -> Result<Value, Errors> {
let data = &mut self.data;
if let Ok(obj) = Self::get_object(keys, data) {
// Path Found. It should be an array to accomplish an operation. Otherwise it must be an update not insert.
if let Some(ref mut array) = obj.as_array_mut() {
let mut id = rand::random();
// If id comes with the record use it.
if let Some(id_value) = value.get("id") |
let value_with_id = &mut value.clone();
if let Some(obj_id) = value_with_id.as_object_mut() {
obj_id.insert("id".to_string(), serde_json::to_value(id).unwrap());
}
// TODO: random id conflict must be resolved.
if let Some(idx) = Database::find_index(array, &id) {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error {:?}. \"id\" duplicates \
record at index: {:?}",
&value_with_id,
idx)))
} else {
array.push(value_with_id.clone());
info!(&self.logger, "Insert - Ok id: {:?}", &id);
debug!(&self.logger, "Insert - Value {}", &value_with_id);
Ok(value_with_id.clone())
}
} else {
log_n_wrap(&self.logger,
Conflict(format!("Insert - Error already has an object with the \
given key: {:?}",
keys)))
}
} else {
log_n_wrap(&self.logger,
NotFound(format!("Insert - Error {:?}. No record with the given path:",
keys)))
}
}
} | {
if let Some(parsed) = id_value.as_i64() {
id = parsed;
}
} | conditional_block |
calendar.stories.ts | import { Component, LOCALE_ID, OnInit } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { Meta, moduleMetadata, Story } from '@storybook/angular';
import { ALuDateAdapter, LuNativeDateAdapter, LuStringDateAdapter } from '@lucca-front/ng/core';
import { LuDateModule } from '@lucca-front/ng/date';
@Component({
selector: 'date-calendar-stories',
templateUrl: './calendar.stories.html'
})
class CalendarStories implements OnInit {
date = new Date();
ngOnInit() {
// this.date.setFullYear(2016);
}
random() |
}
export default {
title: 'NG/Date/Calendar',
component: CalendarStories,
decorators: [
moduleMetadata({
entryComponents: [CalendarStories],
imports: [
LuDateModule,
BrowserAnimationsModule,
FormsModule,
],
providers: [
{ provide: LOCALE_ID, useValue: 'en-US' },
// { provide: LOCALE_ID, useValue: 'fr-FR' },
{ provide: ALuDateAdapter, useClass: LuNativeDateAdapter },
]
})
]
} as Meta;
const template: Story<CalendarStories> = (args: CalendarStories) => ({
props: args,
});
export const basic = template.bind({});
basic.args = {
model: new Date(),
}
| {
this.date = new Date(this.date);
this.date.setDate(Math.ceil(Math.random() * 30));
} | identifier_body |
calendar.stories.ts | import { Component, LOCALE_ID, OnInit } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { Meta, moduleMetadata, Story } from '@storybook/angular';
import { ALuDateAdapter, LuNativeDateAdapter, LuStringDateAdapter } from '@lucca-front/ng/core';
import { LuDateModule } from '@lucca-front/ng/date';
@Component({
selector: 'date-calendar-stories',
templateUrl: './calendar.stories.html'
})
class | implements OnInit {
date = new Date();
ngOnInit() {
// this.date.setFullYear(2016);
}
random() {
this.date = new Date(this.date);
this.date.setDate(Math.ceil(Math.random() * 30));
}
}
export default {
title: 'NG/Date/Calendar',
component: CalendarStories,
decorators: [
moduleMetadata({
entryComponents: [CalendarStories],
imports: [
LuDateModule,
BrowserAnimationsModule,
FormsModule,
],
providers: [
{ provide: LOCALE_ID, useValue: 'en-US' },
// { provide: LOCALE_ID, useValue: 'fr-FR' },
{ provide: ALuDateAdapter, useClass: LuNativeDateAdapter },
]
})
]
} as Meta;
const template: Story<CalendarStories> = (args: CalendarStories) => ({
props: args,
});
export const basic = template.bind({});
basic.args = {
model: new Date(),
}
| CalendarStories | identifier_name |
calendar.stories.ts | import { Component, LOCALE_ID, OnInit } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { Meta, moduleMetadata, Story } from '@storybook/angular';
import { ALuDateAdapter, LuNativeDateAdapter, LuStringDateAdapter } from '@lucca-front/ng/core';
import { LuDateModule } from '@lucca-front/ng/date'; | })
class CalendarStories implements OnInit {
date = new Date();
ngOnInit() {
// this.date.setFullYear(2016);
}
random() {
this.date = new Date(this.date);
this.date.setDate(Math.ceil(Math.random() * 30));
}
}
export default {
title: 'NG/Date/Calendar',
component: CalendarStories,
decorators: [
moduleMetadata({
entryComponents: [CalendarStories],
imports: [
LuDateModule,
BrowserAnimationsModule,
FormsModule,
],
providers: [
{ provide: LOCALE_ID, useValue: 'en-US' },
// { provide: LOCALE_ID, useValue: 'fr-FR' },
{ provide: ALuDateAdapter, useClass: LuNativeDateAdapter },
]
})
]
} as Meta;
const template: Story<CalendarStories> = (args: CalendarStories) => ({
props: args,
});
export const basic = template.bind({});
basic.args = {
model: new Date(),
} |
@Component({
selector: 'date-calendar-stories',
templateUrl: './calendar.stories.html' | random_line_split |
Server.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const ServerOptions_1 = require("./ServerOptions");
const Http = require("http");
const Https = require("https");
const express = require("express");
const debug = require("debug");
const compression = require("compression");
const path = require("path");
const datefmt = require("dateformat");
const favicon = require("serve-favicon");
const fs = require("fs");
const cluster = require("cluster");
const process = require("process");
const os = require("os");
const cors = require("cors");
const logError = debug('saco:error');
const logInfo = debug('saco:info');
var ClusterMessage;
(function (ClusterMessage) {
ClusterMessage[ClusterMessage["WORKER_LISTENING"] = 0] = "WORKER_LISTENING";
})(ClusterMessage || (ClusterMessage = {}));
class Server {
constructor(options) {
this.startedWorkersCount = 0;
this.app = express();
this.options = Object.assign({}, ServerOptions_1.DEFAULT_OPTIONS, options);
this.options.workers = Math.min(Math.max(this.options.workers, 1), os.cpus().length);
this.appConfigure();
}
isHttps() {
return this.options.key != null && this.options.cert != null;
}
setMaxSockets() |
appConfigure() {
this.app.disable('x-powered-by');
if (this.options.cors) {
this.app.use(cors());
}
this.app.use(compression());
if (this.options.behindProxy) {
this.app.enable('trust proxy');
}
if (this.options.verbose) {
this.app.use((req, res, next) => {
logInfo(this.options.name, datefmt(new Date(), this.options.dateformat), 'pid:', process.pid, 'ip:', req.ip, '\t', req.method, '\t', req.url);
next();
});
}
this.app.use(this.options.assets.url, express.static(path.join(this.options.rootPath, this.options.assets.path), { maxAge: this.options.maxAge }));
this.app.get(this.options.index.url, (req, res) => {
res.setHeader('Cache-Control', `public, max-age=${this.options.maxAge}`);
res.sendFile(path.join(this.options.rootPath, this.options.index.path));
});
this.app.use((err, req, res, next) => {
logError(datefmt(new Date(), this.options.dateformat), '\t:', req.method, req.url);
logError(err.stack);
res.status(500).send('Something broke!');
});
if (this.options.favicon != null) {
this.app.use(this.options.favicon.url, favicon(path.join(this.options.rootPath, this.options.favicon.path)));
}
}
createServer() {
if (this.isHttps()) {
logInfo('Starting https server on worker %O...', process.pid);
let httpsOptions = {
key: fs.readFileSync(this.options.key),
cert: fs.readFileSync(this.options.cert)
};
return Https.createServer(httpsOptions, this.app);
}
else {
logInfo('Starting http server on worker %O...', process.pid);
return Http.createServer(this.app);
}
}
startMaster() {
var self = this;
return new Promise((resolve, reject) => {
for (let i = 0; i < self.options.workers; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
logInfo(`Worker %O died`, worker.process.pid);
self.startedWorkersCount--;
if (self.startedWorkersCount === 0) {
logInfo('Bye');
}
});
cluster.on('message', (worker, data) => {
logInfo('Process %O listening on port %O', data.pid, self.options.port);
self.startedWorkersCount++;
if (self.startedWorkersCount === self.options.workers) {
logInfo('Server ready');
resolve(self.startedWorkersCount);
}
});
cluster.on('online', worker => {
logInfo('Process %O just went online', worker.process.pid);
});
});
}
sendMaster(pid, msg) {
process.send({ pid, msg });
}
startWorker() {
var self = this;
return new Promise((resolve, reject) => {
self.server = self.createServer();
self.server
.listen(self.options.port, self.options.ip, () => {
self.sendMaster(process.pid, ClusterMessage.WORKER_LISTENING);
resolve();
})
.on('error', () => {
logError('Failed to start the server on port %O', self.options.port);
reject();
});
});
}
// returnes a promise that resolves only after all workers
// have sent ClusterMessage.WORKER_LISTENING to the master
start() {
var self = this;
return new Promise((resolve, reject) => {
if (cluster.isMaster) {
logInfo(`Starting %O master %O...`, this.options.name, process.pid);
logInfo('Options: %O', self.options);
self.setMaxSockets();
resolve(self.startMaster());
}
else {
logInfo(`Starting %O worker %O...`, this.options.name, process.pid);
self.startWorker();
}
});
}
// returnes a promise that resolves only after all
// workers have send 'exit' event to the master
stop() {
return new Promise((resolve, reject) => {
cluster.disconnect(() => {
resolve();
});
});
}
}
exports.Server = Server;
//# sourceMappingURL=Server.js.map | {
if (this.isHttps()) {
Https.globalAgent.maxSockets = Infinity;
logInfo('Https max sockets set to %O', Https.globalAgent.maxSockets);
}
else {
Http.globalAgent.maxSockets = Infinity;
logInfo('Http max sockets set to %O', Http.globalAgent.maxSockets);
}
} | identifier_body |
Server.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const ServerOptions_1 = require("./ServerOptions");
const Http = require("http");
const Https = require("https");
const express = require("express");
const debug = require("debug");
const compression = require("compression");
const path = require("path");
const datefmt = require("dateformat");
const favicon = require("serve-favicon");
const fs = require("fs");
const cluster = require("cluster");
const process = require("process");
const os = require("os");
const cors = require("cors");
const logError = debug('saco:error');
const logInfo = debug('saco:info');
var ClusterMessage;
(function (ClusterMessage) {
ClusterMessage[ClusterMessage["WORKER_LISTENING"] = 0] = "WORKER_LISTENING";
})(ClusterMessage || (ClusterMessage = {}));
class Server {
constructor(options) {
this.startedWorkersCount = 0;
this.app = express();
this.options = Object.assign({}, ServerOptions_1.DEFAULT_OPTIONS, options);
this.options.workers = Math.min(Math.max(this.options.workers, 1), os.cpus().length);
this.appConfigure();
}
isHttps() {
return this.options.key != null && this.options.cert != null;
}
setMaxSockets() {
if (this.isHttps()) {
Https.globalAgent.maxSockets = Infinity;
logInfo('Https max sockets set to %O', Https.globalAgent.maxSockets);
}
else {
Http.globalAgent.maxSockets = Infinity;
logInfo('Http max sockets set to %O', Http.globalAgent.maxSockets);
}
}
appConfigure() {
this.app.disable('x-powered-by');
if (this.options.cors) |
this.app.use(compression());
if (this.options.behindProxy) {
this.app.enable('trust proxy');
}
if (this.options.verbose) {
this.app.use((req, res, next) => {
logInfo(this.options.name, datefmt(new Date(), this.options.dateformat), 'pid:', process.pid, 'ip:', req.ip, '\t', req.method, '\t', req.url);
next();
});
}
this.app.use(this.options.assets.url, express.static(path.join(this.options.rootPath, this.options.assets.path), { maxAge: this.options.maxAge }));
this.app.get(this.options.index.url, (req, res) => {
res.setHeader('Cache-Control', `public, max-age=${this.options.maxAge}`);
res.sendFile(path.join(this.options.rootPath, this.options.index.path));
});
this.app.use((err, req, res, next) => {
logError(datefmt(new Date(), this.options.dateformat), '\t:', req.method, req.url);
logError(err.stack);
res.status(500).send('Something broke!');
});
if (this.options.favicon != null) {
this.app.use(this.options.favicon.url, favicon(path.join(this.options.rootPath, this.options.favicon.path)));
}
}
createServer() {
if (this.isHttps()) {
logInfo('Starting https server on worker %O...', process.pid);
let httpsOptions = {
key: fs.readFileSync(this.options.key),
cert: fs.readFileSync(this.options.cert)
};
return Https.createServer(httpsOptions, this.app);
}
else {
logInfo('Starting http server on worker %O...', process.pid);
return Http.createServer(this.app);
}
}
startMaster() {
var self = this;
return new Promise((resolve, reject) => {
for (let i = 0; i < self.options.workers; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
logInfo(`Worker %O died`, worker.process.pid);
self.startedWorkersCount--;
if (self.startedWorkersCount === 0) {
logInfo('Bye');
}
});
cluster.on('message', (worker, data) => {
logInfo('Process %O listening on port %O', data.pid, self.options.port);
self.startedWorkersCount++;
if (self.startedWorkersCount === self.options.workers) {
logInfo('Server ready');
resolve(self.startedWorkersCount);
}
});
cluster.on('online', worker => {
logInfo('Process %O just went online', worker.process.pid);
});
});
}
sendMaster(pid, msg) {
process.send({ pid, msg });
}
startWorker() {
var self = this;
return new Promise((resolve, reject) => {
self.server = self.createServer();
self.server
.listen(self.options.port, self.options.ip, () => {
self.sendMaster(process.pid, ClusterMessage.WORKER_LISTENING);
resolve();
})
.on('error', () => {
logError('Failed to start the server on port %O', self.options.port);
reject();
});
});
}
// returnes a promise that resolves only after all workers
// have sent ClusterMessage.WORKER_LISTENING to the master
start() {
var self = this;
return new Promise((resolve, reject) => {
if (cluster.isMaster) {
logInfo(`Starting %O master %O...`, this.options.name, process.pid);
logInfo('Options: %O', self.options);
self.setMaxSockets();
resolve(self.startMaster());
}
else {
logInfo(`Starting %O worker %O...`, this.options.name, process.pid);
self.startWorker();
}
});
}
// returnes a promise that resolves only after all
// workers have send 'exit' event to the master
stop() {
return new Promise((resolve, reject) => {
cluster.disconnect(() => {
resolve();
});
});
}
}
exports.Server = Server;
//# sourceMappingURL=Server.js.map | {
this.app.use(cors());
} | conditional_block |
Server.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const ServerOptions_1 = require("./ServerOptions");
const Http = require("http");
const Https = require("https");
const express = require("express");
const debug = require("debug");
const compression = require("compression");
const path = require("path");
const datefmt = require("dateformat");
const favicon = require("serve-favicon");
const fs = require("fs");
const cluster = require("cluster");
const process = require("process");
const os = require("os");
const cors = require("cors");
const logError = debug('saco:error');
const logInfo = debug('saco:info');
var ClusterMessage;
(function (ClusterMessage) {
ClusterMessage[ClusterMessage["WORKER_LISTENING"] = 0] = "WORKER_LISTENING";
})(ClusterMessage || (ClusterMessage = {}));
class Server {
constructor(options) {
this.startedWorkersCount = 0;
this.app = express();
this.options = Object.assign({}, ServerOptions_1.DEFAULT_OPTIONS, options);
this.options.workers = Math.min(Math.max(this.options.workers, 1), os.cpus().length);
this.appConfigure();
}
isHttps() {
return this.options.key != null && this.options.cert != null;
}
setMaxSockets() {
if (this.isHttps()) {
Https.globalAgent.maxSockets = Infinity;
logInfo('Https max sockets set to %O', Https.globalAgent.maxSockets);
}
else {
Http.globalAgent.maxSockets = Infinity;
logInfo('Http max sockets set to %O', Http.globalAgent.maxSockets);
}
}
appConfigure() {
this.app.disable('x-powered-by');
if (this.options.cors) {
this.app.use(cors());
}
this.app.use(compression());
if (this.options.behindProxy) {
this.app.enable('trust proxy');
}
if (this.options.verbose) {
this.app.use((req, res, next) => {
logInfo(this.options.name, datefmt(new Date(), this.options.dateformat), 'pid:', process.pid, 'ip:', req.ip, '\t', req.method, '\t', req.url);
next();
});
}
this.app.use(this.options.assets.url, express.static(path.join(this.options.rootPath, this.options.assets.path), { maxAge: this.options.maxAge }));
this.app.get(this.options.index.url, (req, res) => {
res.setHeader('Cache-Control', `public, max-age=${this.options.maxAge}`);
res.sendFile(path.join(this.options.rootPath, this.options.index.path));
});
this.app.use((err, req, res, next) => {
logError(datefmt(new Date(), this.options.dateformat), '\t:', req.method, req.url);
logError(err.stack);
res.status(500).send('Something broke!');
});
if (this.options.favicon != null) {
this.app.use(this.options.favicon.url, favicon(path.join(this.options.rootPath, this.options.favicon.path)));
}
}
createServer() {
if (this.isHttps()) {
logInfo('Starting https server on worker %O...', process.pid);
let httpsOptions = {
key: fs.readFileSync(this.options.key),
cert: fs.readFileSync(this.options.cert)
};
return Https.createServer(httpsOptions, this.app);
}
else {
logInfo('Starting http server on worker %O...', process.pid);
return Http.createServer(this.app);
}
}
startMaster() {
var self = this;
return new Promise((resolve, reject) => {
for (let i = 0; i < self.options.workers; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
logInfo(`Worker %O died`, worker.process.pid);
self.startedWorkersCount--;
if (self.startedWorkersCount === 0) {
logInfo('Bye');
}
});
cluster.on('message', (worker, data) => {
logInfo('Process %O listening on port %O', data.pid, self.options.port);
self.startedWorkersCount++;
if (self.startedWorkersCount === self.options.workers) {
logInfo('Server ready');
resolve(self.startedWorkersCount);
}
});
cluster.on('online', worker => {
logInfo('Process %O just went online', worker.process.pid);
});
});
}
sendMaster(pid, msg) {
process.send({ pid, msg });
}
startWorker() {
var self = this;
return new Promise((resolve, reject) => {
self.server = self.createServer();
self.server
.listen(self.options.port, self.options.ip, () => {
self.sendMaster(process.pid, ClusterMessage.WORKER_LISTENING);
resolve();
})
.on('error', () => {
logError('Failed to start the server on port %O', self.options.port);
reject();
});
});
}
// returnes a promise that resolves only after all workers
// have sent ClusterMessage.WORKER_LISTENING to the master
start() {
var self = this;
return new Promise((resolve, reject) => {
if (cluster.isMaster) {
logInfo(`Starting %O master %O...`, this.options.name, process.pid);
logInfo('Options: %O', self.options);
self.setMaxSockets();
resolve(self.startMaster());
}
else {
logInfo(`Starting %O worker %O...`, this.options.name, process.pid);
self.startWorker();
}
});
}
// returnes a promise that resolves only after all
// workers have send 'exit' event to the master
stop() {
return new Promise((resolve, reject) => {
cluster.disconnect(() => { | resolve();
});
});
}
}
exports.Server = Server;
//# sourceMappingURL=Server.js.map | random_line_split |
|
Server.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const ServerOptions_1 = require("./ServerOptions");
const Http = require("http");
const Https = require("https");
const express = require("express");
const debug = require("debug");
const compression = require("compression");
const path = require("path");
const datefmt = require("dateformat");
const favicon = require("serve-favicon");
const fs = require("fs");
const cluster = require("cluster");
const process = require("process");
const os = require("os");
const cors = require("cors");
const logError = debug('saco:error');
const logInfo = debug('saco:info');
var ClusterMessage;
(function (ClusterMessage) {
ClusterMessage[ClusterMessage["WORKER_LISTENING"] = 0] = "WORKER_LISTENING";
})(ClusterMessage || (ClusterMessage = {}));
class Server {
constructor(options) {
this.startedWorkersCount = 0;
this.app = express();
this.options = Object.assign({}, ServerOptions_1.DEFAULT_OPTIONS, options);
this.options.workers = Math.min(Math.max(this.options.workers, 1), os.cpus().length);
this.appConfigure();
}
isHttps() {
return this.options.key != null && this.options.cert != null;
}
setMaxSockets() {
if (this.isHttps()) {
Https.globalAgent.maxSockets = Infinity;
logInfo('Https max sockets set to %O', Https.globalAgent.maxSockets);
}
else {
Http.globalAgent.maxSockets = Infinity;
logInfo('Http max sockets set to %O', Http.globalAgent.maxSockets);
}
}
appConfigure() {
this.app.disable('x-powered-by');
if (this.options.cors) {
this.app.use(cors());
}
this.app.use(compression());
if (this.options.behindProxy) {
this.app.enable('trust proxy');
}
if (this.options.verbose) {
this.app.use((req, res, next) => {
logInfo(this.options.name, datefmt(new Date(), this.options.dateformat), 'pid:', process.pid, 'ip:', req.ip, '\t', req.method, '\t', req.url);
next();
});
}
this.app.use(this.options.assets.url, express.static(path.join(this.options.rootPath, this.options.assets.path), { maxAge: this.options.maxAge }));
this.app.get(this.options.index.url, (req, res) => {
res.setHeader('Cache-Control', `public, max-age=${this.options.maxAge}`);
res.sendFile(path.join(this.options.rootPath, this.options.index.path));
});
this.app.use((err, req, res, next) => {
logError(datefmt(new Date(), this.options.dateformat), '\t:', req.method, req.url);
logError(err.stack);
res.status(500).send('Something broke!');
});
if (this.options.favicon != null) {
this.app.use(this.options.favicon.url, favicon(path.join(this.options.rootPath, this.options.favicon.path)));
}
}
createServer() {
if (this.isHttps()) {
logInfo('Starting https server on worker %O...', process.pid);
let httpsOptions = {
key: fs.readFileSync(this.options.key),
cert: fs.readFileSync(this.options.cert)
};
return Https.createServer(httpsOptions, this.app);
}
else {
logInfo('Starting http server on worker %O...', process.pid);
return Http.createServer(this.app);
}
}
startMaster() {
var self = this;
return new Promise((resolve, reject) => {
for (let i = 0; i < self.options.workers; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
logInfo(`Worker %O died`, worker.process.pid);
self.startedWorkersCount--;
if (self.startedWorkersCount === 0) {
logInfo('Bye');
}
});
cluster.on('message', (worker, data) => {
logInfo('Process %O listening on port %O', data.pid, self.options.port);
self.startedWorkersCount++;
if (self.startedWorkersCount === self.options.workers) {
logInfo('Server ready');
resolve(self.startedWorkersCount);
}
});
cluster.on('online', worker => {
logInfo('Process %O just went online', worker.process.pid);
});
});
}
| (pid, msg) {
process.send({ pid, msg });
}
startWorker() {
var self = this;
return new Promise((resolve, reject) => {
self.server = self.createServer();
self.server
.listen(self.options.port, self.options.ip, () => {
self.sendMaster(process.pid, ClusterMessage.WORKER_LISTENING);
resolve();
})
.on('error', () => {
logError('Failed to start the server on port %O', self.options.port);
reject();
});
});
}
// returnes a promise that resolves only after all workers
// have sent ClusterMessage.WORKER_LISTENING to the master
start() {
var self = this;
return new Promise((resolve, reject) => {
if (cluster.isMaster) {
logInfo(`Starting %O master %O...`, this.options.name, process.pid);
logInfo('Options: %O', self.options);
self.setMaxSockets();
resolve(self.startMaster());
}
else {
logInfo(`Starting %O worker %O...`, this.options.name, process.pid);
self.startWorker();
}
});
}
// returnes a promise that resolves only after all
// workers have send 'exit' event to the master
stop() {
return new Promise((resolve, reject) => {
cluster.disconnect(() => {
resolve();
});
});
}
}
exports.Server = Server;
//# sourceMappingURL=Server.js.map | sendMaster | identifier_name |
register.component.ts | import { Component, OnInit } from '@angular/core';
import { Response } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { RegisterModel } from '../../core/models/register-model';
import { AccountService } from '../../core/account/account.service';
import { ControlBase } from '../../shared/forms/control-base';
import { ControlTextbox } from '../../shared/forms/control-textbox';
@Component({
selector: 'appc-register',
templateUrl: './register.component.html'
})
export class RegisterComponent implements OnInit {
public errors: string[] = [];
public controls: Array<ControlBase<any>>;
constructor(public accountService: AccountService, public router: Router, public route: ActivatedRoute) { }
public register(model: RegisterModel): void {
this.accountService.register(model)
.subscribe((res: Response) => {
this.router.navigate(['../registerconfirmation'], { relativeTo: this.route, queryParams: { emailConfirmed: true } });
},
(errors: string[]) => {
this.errors = errors;
});
};
public ngOnInit() {
const controls: Array<ControlBase<any>> = [
new ControlTextbox({
key: 'username',
label: 'Username',
placeholder: 'Username',
value: '',
type: 'textbox',
required: true,
order: 1
}),
new ControlTextbox({
key: 'firstname',
label: 'Firstname',
placeholder: 'Firstname',
value: '',
type: 'textbox',
required: true,
order: 2
}),
| label: 'Lastname',
placeholder: 'Lastname',
value: '',
type: 'textbox',
required: true,
order: 3
}),
new ControlTextbox({
key: 'email',
label: 'Email',
placeholder: 'Email',
value: '',
type: 'email',
required: true,
order: 4
}),
new ControlTextbox({
key: 'password',
label: 'Password',
placeholder: 'Password',
value: '',
type: 'password',
required: true,
order: 5
})
];
this.controls = controls;
}
} | new ControlTextbox({
key: 'lastname',
| random_line_split |
register.component.ts | import { Component, OnInit } from '@angular/core';
import { Response } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { RegisterModel } from '../../core/models/register-model';
import { AccountService } from '../../core/account/account.service';
import { ControlBase } from '../../shared/forms/control-base';
import { ControlTextbox } from '../../shared/forms/control-textbox';
@Component({
selector: 'appc-register',
templateUrl: './register.component.html'
})
export class RegisterComponent implements OnInit {
public errors: string[] = [];
public controls: Array<ControlBase<any>>;
co | ublic accountService: AccountService, public router: Router, public route: ActivatedRoute) { }
public register(model: RegisterModel): void {
this.accountService.register(model)
.subscribe((res: Response) => {
this.router.navigate(['../registerconfirmation'], { relativeTo: this.route, queryParams: { emailConfirmed: true } });
},
(errors: string[]) => {
this.errors = errors;
});
};
public ngOnInit() {
const controls: Array<ControlBase<any>> = [
new ControlTextbox({
key: 'username',
label: 'Username',
placeholder: 'Username',
value: '',
type: 'textbox',
required: true,
order: 1
}),
new ControlTextbox({
key: 'firstname',
label: 'Firstname',
placeholder: 'Firstname',
value: '',
type: 'textbox',
required: true,
order: 2
}),
new ControlTextbox({
key: 'lastname',
label: 'Lastname',
placeholder: 'Lastname',
value: '',
type: 'textbox',
required: true,
order: 3
}),
new ControlTextbox({
key: 'email',
label: 'Email',
placeholder: 'Email',
value: '',
type: 'email',
required: true,
order: 4
}),
new ControlTextbox({
key: 'password',
label: 'Password',
placeholder: 'Password',
value: '',
type: 'password',
required: true,
order: 5
})
];
this.controls = controls;
}
}
| nstructor(p | identifier_name |
register.component.ts | import { Component, OnInit } from '@angular/core';
import { Response } from '@angular/http';
import { Router, ActivatedRoute } from '@angular/router';
import { RegisterModel } from '../../core/models/register-model';
import { AccountService } from '../../core/account/account.service';
import { ControlBase } from '../../shared/forms/control-base';
import { ControlTextbox } from '../../shared/forms/control-textbox';
@Component({
selector: 'appc-register',
templateUrl: './register.component.html'
})
export class RegisterComponent implements OnInit {
public errors: string[] = [];
public controls: Array<ControlBase<any>>;
constructor(public accountService: AccountService, public router: Router, public route: ActivatedRoute) { }
public register(model: RegisterModel): void {
this.accountService.register(model)
.subscribe((res: Response) => {
this.router.navigate(['../registerconfirmation'], { relativeTo: this.route, queryParams: { emailConfirmed: true } });
},
(errors: string[]) => {
this.errors = errors;
});
};
public ngOnInit() {
|
}
|
const controls: Array<ControlBase<any>> = [
new ControlTextbox({
key: 'username',
label: 'Username',
placeholder: 'Username',
value: '',
type: 'textbox',
required: true,
order: 1
}),
new ControlTextbox({
key: 'firstname',
label: 'Firstname',
placeholder: 'Firstname',
value: '',
type: 'textbox',
required: true,
order: 2
}),
new ControlTextbox({
key: 'lastname',
label: 'Lastname',
placeholder: 'Lastname',
value: '',
type: 'textbox',
required: true,
order: 3
}),
new ControlTextbox({
key: 'email',
label: 'Email',
placeholder: 'Email',
value: '',
type: 'email',
required: true,
order: 4
}),
new ControlTextbox({
key: 'password',
label: 'Password',
placeholder: 'Password',
value: '',
type: 'password',
required: true,
order: 5
})
];
this.controls = controls;
}
| identifier_body |
demo_ystockquote.py | # To make print working for Python2/3
from __future__ import print_function
import ystockquote as ysq
def _main():
for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]:
print("=============================================")
print("s: {}".format(s))
print("get_name: {}".format(ysq.get_name(s)))
print("get_price: {}".format(ysq.get_price(s)))
print("get_volume: {}".format(ysq.get_volume(s)))
print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s)))
print("get_market_cap: {}".format(ysq.get_market_cap(s))) | print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s)))
print("get_52_week_low: {}".format(ysq.get_52_week_low(s)))
print("get_52_week_high: {}".format(ysq.get_52_week_high(s)))
print("get_currency: {}".format(ysq.get_currency(s)))
if __name__ == '__main__':
_main() | print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s))) | random_line_split |
demo_ystockquote.py | # To make print working for Python2/3
from __future__ import print_function
import ystockquote as ysq
def | ():
for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]:
print("=============================================")
print("s: {}".format(s))
print("get_name: {}".format(ysq.get_name(s)))
print("get_price: {}".format(ysq.get_price(s)))
print("get_volume: {}".format(ysq.get_volume(s)))
print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s)))
print("get_market_cap: {}".format(ysq.get_market_cap(s)))
print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s)))
print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s)))
print("get_52_week_low: {}".format(ysq.get_52_week_low(s)))
print("get_52_week_high: {}".format(ysq.get_52_week_high(s)))
print("get_currency: {}".format(ysq.get_currency(s)))
if __name__ == '__main__':
_main()
| _main | identifier_name |
demo_ystockquote.py | # To make print working for Python2/3
from __future__ import print_function
import ystockquote as ysq
def _main():
for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]:
print("=============================================")
print("s: {}".format(s))
print("get_name: {}".format(ysq.get_name(s)))
print("get_price: {}".format(ysq.get_price(s)))
print("get_volume: {}".format(ysq.get_volume(s)))
print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s)))
print("get_market_cap: {}".format(ysq.get_market_cap(s)))
print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s)))
print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s)))
print("get_52_week_low: {}".format(ysq.get_52_week_low(s)))
print("get_52_week_high: {}".format(ysq.get_52_week_high(s)))
print("get_currency: {}".format(ysq.get_currency(s)))
if __name__ == '__main__':
| _main() | conditional_block |
|
demo_ystockquote.py | # To make print working for Python2/3
from __future__ import print_function
import ystockquote as ysq
def _main():
|
if __name__ == '__main__':
_main()
| for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]:
print("=============================================")
print("s: {}".format(s))
print("get_name: {}".format(ysq.get_name(s)))
print("get_price: {}".format(ysq.get_price(s)))
print("get_volume: {}".format(ysq.get_volume(s)))
print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s)))
print("get_market_cap: {}".format(ysq.get_market_cap(s)))
print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s)))
print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s)))
print("get_52_week_low: {}".format(ysq.get_52_week_low(s)))
print("get_52_week_high: {}".format(ysq.get_52_week_high(s)))
print("get_currency: {}".format(ysq.get_currency(s))) | identifier_body |
__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
# Copyright (c) 2015 Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
|
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def optimize(self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path):
output = subprocess.check_output([
self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
]
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output)
| super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_path = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False | identifier_body |
__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
# Copyright (c) 2015 Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_path = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def optimize(self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path): | self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
]
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output) | output = subprocess.check_output([ | random_line_split |
__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
# Copyright (c) 2015 Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_path = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def | (self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path):
output = subprocess.check_output([
self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
]
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output)
| optimize | identifier_name |
__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
# Copyright (c) 2015 Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_path = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def optimize(self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path):
output = subprocess.check_output([
self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
|
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output)
| new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
] | conditional_block |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This crate provides the `regex!` macro. Its use is documented in the
//! `regex` crate.
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate regex;
extern crate regex_syntax;
extern crate rustc_plugin;
extern crate syntax;
use std::collections::BTreeMap;
use std::usize;
use syntax::ast;
use syntax::codemap;
use syntax::ext::build::AstBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::parse::token;
use syntax::print::pprust;
use syntax::fold::Folder;
use syntax::ptr::P;
use rustc_plugin::Registry;
use regex::internal::{Compiler, EmptyLook, Inst, Program};
use regex_syntax::Expr;
/// For the `regex!` syntax extension. Do not use.
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("regex", native);
}
/// Generates specialized code for the Pike VM for a particular regular
/// expression.
///
/// There are two primary differences between the code generated here and the
/// general code in vm.rs.
///
/// 1. All heap allocation is removed. Sized vector types are used instead.
/// Care must be taken to make sure that these vectors are not copied
/// gratuitously. (If you're not sure, run the benchmarks. They will yell
/// at you if you do.)
/// 2. The main `match instruction { ... }` expressions are replaced with more
/// direct `match pc { ... }`. The generators can be found in
/// `step_insts` and `add_insts`.
///
/// It is strongly recommended to read the dynamic implementation in vm.rs
/// first before trying to understand the code generator. The implementation
/// strategy is identical and vm.rs has comments and will be easier to follow.
fn native(cx: &mut ExtCtxt, sp: codemap::Span, tts: &[ast::TokenTree])
-> Box<MacResult+'static> {
let regex = match parse(cx, tts) {
Some(r) => r,
// error is logged in 'parse' with cx.span_err
None => return DummyResult::any(sp),
};
// We use the largest possible size limit because this is happening at
// compile time. We trust the programmer.
let expr = match Expr::parse(®ex) {
Ok(expr) => expr,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let prog = match Compiler::new().size_limit(usize::MAX).compile(&[expr]) {
Ok(re) => re,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let names = prog.captures.iter().cloned().collect();
let mut gen = NfaGen {
cx: &*cx,
sp: sp,
prog: prog,
names: names,
original: regex,
};
MacEager::expr(gen.code())
}
struct NfaGen<'a> {
cx: &'a ExtCtxt<'a>,
sp: codemap::Span,
prog: Program,
names: Vec<Option<String>>,
original: String,
}
impl<'a> NfaGen<'a> {
fn code(&mut self) -> P<ast::Expr> {
// Most or all of the following things are used in the quasiquoted
// expression returned.
let num_cap_locs = 2 * self.prog.captures.len();
let num_insts = self.prog.len();
let cap_names = self.vec_expr(self.names.iter(),
&mut |cx, name| match *name {
Some(ref name) => {
let name = &**name;
quote_expr!(cx, Some($name))
}
None => cx.expr_none(self.sp),
}
);
let capture_name_idx = {
let mut capture_name_idx = BTreeMap::new();
for (i, name) in self.names.iter().enumerate() {
if let Some(ref name) = *name {
capture_name_idx.insert(name.to_owned(), i);
}
}
self.vec_expr(capture_name_idx.iter(),
&mut |cx, (name, group_idx)|
quote_expr!(cx, ($name, $group_idx))
)
};
let is_anchored_start = self.prog.is_anchored_start;
let step_insts = self.step_insts();
let add_insts = self.add_insts();
let regex = &*self.original;
quote_expr!(self.cx, {
// When `regex!` is bound to a name that is not used, we have to make sure
// that dead_code warnings don't bubble up to the user from the generated
// code. Therefore, we suppress them by allowing dead_code. The effect is that
// the user is only warned about *their* unused variable/code, and not the
// unused code generated by regex!. See #14185 for an example.
#[allow(dead_code)]
static CAPTURES: &'static [Option<&'static str>] = &$cap_names;
#[allow(dead_code)]
static CAPTURE_NAME_IDX: &'static [(&'static str, usize)] = &$capture_name_idx;
#[allow(dead_code)]
fn exec<'t>(
mut caps: &mut [Option<usize>],
input: &'t str,
start: usize,
) -> bool {
#![allow(unused_imports)]
#![allow(unused_mut)]
use regex::internal::{Char, CharInput, InputAt, Input, Inst};
let input = CharInput::new(input.as_bytes());
let at = input.at(start);
return Nfa {
input: input,
ncaps: caps.len(),
}.exec(&mut NfaThreads::new(), &mut caps, at);
struct Nfa<'t> {
input: CharInput<'t>,
ncaps: usize,
}
impl<'t> Nfa<'t> {
#[allow(unused_variables)]
fn exec(
&mut self,
mut q: &mut NfaThreads,
mut caps: &mut [Option<usize>],
mut at: InputAt,
) -> bool {
let mut matched = false;
let (mut clist, mut nlist) = (&mut q.clist, &mut q.nlist);
clist.empty(); nlist.empty();
'LOOP: loop {
if clist.size == 0 {
if matched || (!at.is_start() && $is_anchored_start) {
break;
}
// TODO: Prefix matching... Hmm.
// Prefix matching now uses a DFA, so I think this is
// going to require encoding that DFA statically.
}
if clist.size == 0 || (!$is_anchored_start && !matched) {
self.add(clist, &mut caps, 0, at);
}
let at_next = self.input.at(at.next_pos());
for i in 0..clist.size {
let pc = clist.pc(i);
let tcaps = clist.caps(i);
if self.step(nlist, caps, tcaps, pc, at, at_next) {
matched = true;
if caps.len() == 0 {
break 'LOOP;
}
break;
}
}
if at.char().is_none() {
break;
}
at = at_next;
::std::mem::swap(&mut clist, &mut nlist);
nlist.empty();
}
matched
}
// Sometimes `nlist` is never used (for empty regexes).
#[allow(unused_variables)]
#[inline]
fn step(
&self,
nlist: &mut Threads,
caps: &mut [Option<usize>],
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
at_next: InputAt,
) -> bool {
$step_insts;
false
}
fn add(
&self,
nlist: &mut Threads,
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
) {
if nlist.contains(pc) {
return;
}
let ti = nlist.add(pc);
$add_insts
}
}
struct NfaThreads {
clist: Threads,
nlist: Threads,
}
struct Threads {
dense: [Thread; $num_insts],
sparse: [usize; $num_insts],
size: usize,
}
struct Thread {
pc: usize,
caps: [Option<usize>; $num_cap_locs],
}
impl NfaThreads {
fn new() -> NfaThreads {
NfaThreads {
clist: Threads::new(),
nlist: Threads::new(),
}
}
fn swap(&mut self) {
::std::mem::swap(&mut self.clist, &mut self.nlist);
}
}
impl Threads {
fn new() -> Threads {
Threads {
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
dense: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: usize) -> usize {
let i = self.size;
self.dense[i].pc = pc;
self.sparse[pc] = i;
self.size += 1;
i
}
#[inline]
fn thread(&mut self, i: usize) -> &mut Thread {
&mut self.dense[i]
}
#[inline]
fn contains(&self, pc: usize) -> bool {
let s = unsafe { ::std::ptr::read_volatile(&self.sparse[pc]) };
s < self.size && self.dense[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: usize) -> usize {
self.dense[i].pc
}
#[inline]
fn caps<'r>(&'r mut self, i: usize) -> &'r mut [Option<usize>] {
&mut self.dense[i].caps
}
}
}
::regex::Regex(::regex::internal::_Regex::Plugin(::regex::internal::Plugin {
original: $regex,
names: &CAPTURES,
groups: &CAPTURE_NAME_IDX,
prog: exec,
}))
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::EmptyLook(ref inst) => {
let nextpc = inst.goto;
match inst.look {
EmptyLook::StartLine => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() || prev == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndLine => {
quote_expr!(self.cx, {
if at.char().is_none() || at.char() == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::StartText => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndText => {
quote_expr!(self.cx, {
if at.char().is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundary
| EmptyLook::NotWordBoundary => {
let m = if inst.look == EmptyLook::WordBoundary {
quote_expr!(self.cx, { w1 ^ w2 })
} else {
quote_expr!(self.cx, { !(w1 ^ w2) })
};
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
let w1 = prev.is_word_char();
let w2 = at.char().is_word_char();
if $m {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundaryAscii
| EmptyLook::NotWordBoundaryAscii => {
unreachable!()
}
}
}
Inst::Save(ref inst) => {
let nextpc = inst.goto;
let slot = inst.slot;
quote_expr!(self.cx, {
if $slot >= self.ncaps {
self.add(nlist, thread_caps, $nextpc, at);
} else {
let old = thread_caps[$slot];
thread_caps[$slot] = Some(at.pos());
self.add(nlist, thread_caps, $nextpc, at);
thread_caps[$slot] = old;
}
})
}
Inst::Split(ref inst) => {
let (x, y) = (inst.goto1, inst.goto2);
quote_expr!(self.cx, {
self.add(nlist, thread_caps, $x, at);
self.add(nlist, thread_caps, $y, at);
})
}
// For Match, Char, Ranges
_ => quote_expr!(self.cx, {
let mut t = &mut nlist.thread(ti);
for (slot, val) in t.caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
}),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::Match(_) => quote_expr!(self.cx, {
for (slot, val) in caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
return true;
}),
Inst::Char(ref inst) => {
let nextpc = inst.goto;
let c = inst.c;
quote_expr!(self.cx, {
if $c == at.char() {
self.add(nlist, thread_caps, $nextpc, at_next);
}
return false;
})
}
Inst::Ranges(ref inst) => {
let match_class = self.match_class(&inst.ranges);
let nextpc = inst.goto;
quote_expr!(self.cx, {
let mut c = at.char();
if let Some(c) = c.as_char() {
if $match_class {
self.add(nlist, thread_caps, $nextpc, at_next);
}
}
return false;
})
}
// EmptyLook, Save, Jump, Split
_ => quote_expr!(self.cx, { return false; }),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Translates a character class into a match expression.
// This avoids a binary search (and is hopefully replaced by a jump
// table).
fn match_class(&self, ranges: &[(char, char)]) -> P<ast::Expr> {
let mut arms = ranges.iter().map(|&(start, end)| {
let pat = self.cx.pat(
self.sp, ast::PatKind::Range(
quote_expr!(self.cx, $start), quote_expr!(self.cx, $end)));
self.cx.arm(self.sp, vec!(pat), quote_expr!(self.cx, true))
}).collect::<Vec<ast::Arm>>();
arms.push(self.wild_arm_expr(quote_expr!(self.cx, false)));
let match_on = quote_expr!(self.cx, c);
self.cx.expr_match(self.sp, match_on, arms)
}
// Generates code for checking a literal prefix of the search string.
// The code is only generated if the regex *has* a literal prefix.
// Otherwise, a no-op is returned.
// fn check_prefix(&self) -> P<ast::Expr> {
// if self.prog.prefixes.len() == 0 {
// self.empty_block()
// } else {
// quote_expr!(self.cx,
// if clist.size == 0 {
// let haystack = &self.input.as_bytes()[self.ic..];
// match find_prefix(prefix_bytes, haystack) {
// None => break,
// Some(i) => {
// self.ic += i;
// next_ic = self.chars.set(self.ic);
// }
// }
// }
// )
// }
// }
// Builds a `match pc { ... }` expression from a list of arms, specifically
// for matching the current program counter with an instruction.
// A wild-card arm is automatically added that executes a no-op. It will
// never be used, but is added to satisfy the compiler complaining about
// non-exhaustive patterns.
fn match_insts(&self, mut arms: Vec<ast::Arm>) -> P<ast::Expr> |
fn empty_block(&self) -> P<ast::Expr> {
quote_expr!(self.cx, {})
}
// Creates a match arm for the instruction at `pc` with the expression
// `body`.
fn arm_inst(&self, pc: usize, body: P<ast::Expr>) -> ast::Arm {
let pc_pat = self.cx.pat_lit(self.sp, quote_expr!(self.cx, $pc));
self.cx.arm(self.sp, vec!(pc_pat), body)
}
// Creates a wild-card match arm with the expression `body`.
fn wild_arm_expr(&self, body: P<ast::Expr>) -> ast::Arm {
ast::Arm {
attrs: vec!(),
pats: vec!(P(ast::Pat{
id: ast::DUMMY_NODE_ID,
span: self.sp,
node: ast::PatKind::Wild,
})),
guard: None,
body: body,
}
}
// Converts `xs` to a `[x1, x2, .., xN]` expression by calling `to_expr`
// on each element in `xs`.
fn vec_expr<T, It: Iterator<Item=T>>(
&self,
xs: It,
to_expr: &mut FnMut(&ExtCtxt, T) -> P<ast::Expr>,
) -> P<ast::Expr> {
let exprs = xs.map(|x| to_expr(self.cx, x)).collect();
self.cx.expr_vec(self.sp, exprs)
}
}
/// Looks for a single string literal and returns it.
/// Otherwise, logs an error with cx.span_err and returns None.
fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
let mut parser = cx.new_parser_from_tts(tts);
if let Ok(expr) = parser.parse_expr() {
let entry = cx.expander().fold_expr(expr);
let regex = match entry.node {
ast::ExprKind::Lit(ref lit) => {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::lit_to_string(&**lit)));
return None
}
}
}
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::expr_to_string(&*entry)));
return None
}
};
if !parser.eat(&token::Eof) {
cx.span_err(parser.span, "only one string literal allowed");
return None;
}
Some(regex)
} else {
cx.parse_sess().span_diagnostic.err("failure parsing token tree");
None
}
}
| {
arms.push(self.wild_arm_expr(self.empty_block()));
self.cx.expr_match(self.sp, quote_expr!(self.cx, pc), arms)
} | identifier_body |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This crate provides the `regex!` macro. Its use is documented in the
//! `regex` crate.
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate regex;
extern crate regex_syntax;
extern crate rustc_plugin;
extern crate syntax;
use std::collections::BTreeMap;
use std::usize;
use syntax::ast;
use syntax::codemap;
use syntax::ext::build::AstBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::parse::token;
use syntax::print::pprust;
use syntax::fold::Folder;
use syntax::ptr::P;
use rustc_plugin::Registry;
use regex::internal::{Compiler, EmptyLook, Inst, Program};
use regex_syntax::Expr;
/// For the `regex!` syntax extension. Do not use.
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("regex", native);
}
/// Generates specialized code for the Pike VM for a particular regular
/// expression.
///
/// There are two primary differences between the code generated here and the
/// general code in vm.rs.
///
/// 1. All heap allocation is removed. Sized vector types are used instead.
/// Care must be taken to make sure that these vectors are not copied
/// gratuitously. (If you're not sure, run the benchmarks. They will yell
/// at you if you do.)
/// 2. The main `match instruction { ... }` expressions are replaced with more
/// direct `match pc { ... }`. The generators can be found in
/// `step_insts` and `add_insts`.
///
/// It is strongly recommended to read the dynamic implementation in vm.rs
/// first before trying to understand the code generator. The implementation
/// strategy is identical and vm.rs has comments and will be easier to follow.
fn native(cx: &mut ExtCtxt, sp: codemap::Span, tts: &[ast::TokenTree])
-> Box<MacResult+'static> {
let regex = match parse(cx, tts) {
Some(r) => r,
// error is logged in 'parse' with cx.span_err
None => return DummyResult::any(sp),
};
// We use the largest possible size limit because this is happening at
// compile time. We trust the programmer.
let expr = match Expr::parse(®ex) {
Ok(expr) => expr,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let prog = match Compiler::new().size_limit(usize::MAX).compile(&[expr]) {
Ok(re) => re,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let names = prog.captures.iter().cloned().collect();
let mut gen = NfaGen {
cx: &*cx,
sp: sp,
prog: prog,
names: names,
original: regex,
};
MacEager::expr(gen.code())
}
struct NfaGen<'a> {
cx: &'a ExtCtxt<'a>,
sp: codemap::Span,
prog: Program,
names: Vec<Option<String>>,
original: String,
}
impl<'a> NfaGen<'a> {
fn code(&mut self) -> P<ast::Expr> {
// Most or all of the following things are used in the quasiquoted
// expression returned.
let num_cap_locs = 2 * self.prog.captures.len();
let num_insts = self.prog.len();
let cap_names = self.vec_expr(self.names.iter(),
&mut |cx, name| match *name {
Some(ref name) => {
let name = &**name;
quote_expr!(cx, Some($name))
}
None => cx.expr_none(self.sp),
}
);
let capture_name_idx = {
let mut capture_name_idx = BTreeMap::new();
for (i, name) in self.names.iter().enumerate() {
if let Some(ref name) = *name {
capture_name_idx.insert(name.to_owned(), i);
}
}
self.vec_expr(capture_name_idx.iter(),
&mut |cx, (name, group_idx)|
quote_expr!(cx, ($name, $group_idx))
)
};
let is_anchored_start = self.prog.is_anchored_start;
let step_insts = self.step_insts();
let add_insts = self.add_insts();
let regex = &*self.original;
quote_expr!(self.cx, {
// When `regex!` is bound to a name that is not used, we have to make sure
// that dead_code warnings don't bubble up to the user from the generated
// code. Therefore, we suppress them by allowing dead_code. The effect is that
// the user is only warned about *their* unused variable/code, and not the
// unused code generated by regex!. See #14185 for an example.
#[allow(dead_code)]
static CAPTURES: &'static [Option<&'static str>] = &$cap_names;
#[allow(dead_code)]
static CAPTURE_NAME_IDX: &'static [(&'static str, usize)] = &$capture_name_idx;
#[allow(dead_code)]
fn exec<'t>(
mut caps: &mut [Option<usize>],
input: &'t str,
start: usize,
) -> bool {
#![allow(unused_imports)]
#![allow(unused_mut)]
use regex::internal::{Char, CharInput, InputAt, Input, Inst};
let input = CharInput::new(input.as_bytes());
let at = input.at(start);
return Nfa {
input: input,
ncaps: caps.len(),
}.exec(&mut NfaThreads::new(), &mut caps, at);
struct Nfa<'t> {
input: CharInput<'t>,
ncaps: usize,
}
impl<'t> Nfa<'t> {
#[allow(unused_variables)]
fn exec(
&mut self,
mut q: &mut NfaThreads,
mut caps: &mut [Option<usize>],
mut at: InputAt,
) -> bool {
let mut matched = false;
let (mut clist, mut nlist) = (&mut q.clist, &mut q.nlist);
clist.empty(); nlist.empty();
'LOOP: loop {
if clist.size == 0 {
if matched || (!at.is_start() && $is_anchored_start) {
break;
}
// TODO: Prefix matching... Hmm.
// Prefix matching now uses a DFA, so I think this is
// going to require encoding that DFA statically.
}
if clist.size == 0 || (!$is_anchored_start && !matched) {
self.add(clist, &mut caps, 0, at);
}
let at_next = self.input.at(at.next_pos());
for i in 0..clist.size {
let pc = clist.pc(i);
let tcaps = clist.caps(i);
if self.step(nlist, caps, tcaps, pc, at, at_next) {
matched = true;
if caps.len() == 0 {
break 'LOOP;
}
break;
}
}
if at.char().is_none() {
break;
}
at = at_next;
::std::mem::swap(&mut clist, &mut nlist);
nlist.empty();
}
matched
}
// Sometimes `nlist` is never used (for empty regexes).
#[allow(unused_variables)]
#[inline]
fn step(
&self,
nlist: &mut Threads,
caps: &mut [Option<usize>],
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
at_next: InputAt,
) -> bool {
$step_insts;
false
}
fn add(
&self,
nlist: &mut Threads,
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
) {
if nlist.contains(pc) {
return;
}
let ti = nlist.add(pc);
$add_insts
}
}
struct NfaThreads {
clist: Threads,
nlist: Threads,
}
struct Threads {
dense: [Thread; $num_insts],
sparse: [usize; $num_insts],
size: usize,
}
struct Thread {
pc: usize,
caps: [Option<usize>; $num_cap_locs],
}
impl NfaThreads {
fn new() -> NfaThreads {
NfaThreads {
clist: Threads::new(),
nlist: Threads::new(),
}
}
fn swap(&mut self) {
::std::mem::swap(&mut self.clist, &mut self.nlist);
}
}
impl Threads {
fn new() -> Threads {
Threads {
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
dense: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: usize) -> usize {
let i = self.size;
self.dense[i].pc = pc;
self.sparse[pc] = i;
self.size += 1;
i
}
#[inline]
fn thread(&mut self, i: usize) -> &mut Thread {
&mut self.dense[i]
}
#[inline]
fn contains(&self, pc: usize) -> bool {
let s = unsafe { ::std::ptr::read_volatile(&self.sparse[pc]) };
s < self.size && self.dense[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: usize) -> usize {
self.dense[i].pc
}
#[inline]
fn caps<'r>(&'r mut self, i: usize) -> &'r mut [Option<usize>] {
&mut self.dense[i].caps
}
}
}
::regex::Regex(::regex::internal::_Regex::Plugin(::regex::internal::Plugin {
original: $regex,
names: &CAPTURES,
groups: &CAPTURE_NAME_IDX,
prog: exec,
}))
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::EmptyLook(ref inst) => {
let nextpc = inst.goto;
match inst.look {
EmptyLook::StartLine => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() || prev == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndLine => {
quote_expr!(self.cx, {
if at.char().is_none() || at.char() == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::StartText => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndText => {
quote_expr!(self.cx, {
if at.char().is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundary
| EmptyLook::NotWordBoundary => {
let m = if inst.look == EmptyLook::WordBoundary {
quote_expr!(self.cx, { w1 ^ w2 })
} else {
quote_expr!(self.cx, { !(w1 ^ w2) })
};
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
let w1 = prev.is_word_char();
let w2 = at.char().is_word_char();
if $m {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundaryAscii
| EmptyLook::NotWordBoundaryAscii => {
unreachable!()
}
}
}
Inst::Save(ref inst) => {
let nextpc = inst.goto;
let slot = inst.slot;
quote_expr!(self.cx, {
if $slot >= self.ncaps {
self.add(nlist, thread_caps, $nextpc, at);
} else {
let old = thread_caps[$slot];
thread_caps[$slot] = Some(at.pos());
self.add(nlist, thread_caps, $nextpc, at);
thread_caps[$slot] = old;
}
})
}
Inst::Split(ref inst) => {
let (x, y) = (inst.goto1, inst.goto2);
quote_expr!(self.cx, {
self.add(nlist, thread_caps, $x, at);
self.add(nlist, thread_caps, $y, at);
})
}
// For Match, Char, Ranges
_ => quote_expr!(self.cx, {
let mut t = &mut nlist.thread(ti);
for (slot, val) in t.caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
}),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::Match(_) => quote_expr!(self.cx, {
for (slot, val) in caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
return true;
}),
Inst::Char(ref inst) => {
let nextpc = inst.goto;
let c = inst.c;
quote_expr!(self.cx, {
if $c == at.char() {
self.add(nlist, thread_caps, $nextpc, at_next);
}
return false;
})
}
Inst::Ranges(ref inst) => {
let match_class = self.match_class(&inst.ranges);
let nextpc = inst.goto;
quote_expr!(self.cx, {
let mut c = at.char();
if let Some(c) = c.as_char() {
if $match_class {
self.add(nlist, thread_caps, $nextpc, at_next);
}
}
return false;
})
}
// EmptyLook, Save, Jump, Split
_ => quote_expr!(self.cx, { return false; }),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Translates a character class into a match expression.
// This avoids a binary search (and is hopefully replaced by a jump
// table).
fn | (&self, ranges: &[(char, char)]) -> P<ast::Expr> {
let mut arms = ranges.iter().map(|&(start, end)| {
let pat = self.cx.pat(
self.sp, ast::PatKind::Range(
quote_expr!(self.cx, $start), quote_expr!(self.cx, $end)));
self.cx.arm(self.sp, vec!(pat), quote_expr!(self.cx, true))
}).collect::<Vec<ast::Arm>>();
arms.push(self.wild_arm_expr(quote_expr!(self.cx, false)));
let match_on = quote_expr!(self.cx, c);
self.cx.expr_match(self.sp, match_on, arms)
}
// Generates code for checking a literal prefix of the search string.
// The code is only generated if the regex *has* a literal prefix.
// Otherwise, a no-op is returned.
// fn check_prefix(&self) -> P<ast::Expr> {
// if self.prog.prefixes.len() == 0 {
// self.empty_block()
// } else {
// quote_expr!(self.cx,
// if clist.size == 0 {
// let haystack = &self.input.as_bytes()[self.ic..];
// match find_prefix(prefix_bytes, haystack) {
// None => break,
// Some(i) => {
// self.ic += i;
// next_ic = self.chars.set(self.ic);
// }
// }
// }
// )
// }
// }
// Builds a `match pc { ... }` expression from a list of arms, specifically
// for matching the current program counter with an instruction.
// A wild-card arm is automatically added that executes a no-op. It will
// never be used, but is added to satisfy the compiler complaining about
// non-exhaustive patterns.
fn match_insts(&self, mut arms: Vec<ast::Arm>) -> P<ast::Expr> {
arms.push(self.wild_arm_expr(self.empty_block()));
self.cx.expr_match(self.sp, quote_expr!(self.cx, pc), arms)
}
fn empty_block(&self) -> P<ast::Expr> {
quote_expr!(self.cx, {})
}
// Creates a match arm for the instruction at `pc` with the expression
// `body`.
fn arm_inst(&self, pc: usize, body: P<ast::Expr>) -> ast::Arm {
let pc_pat = self.cx.pat_lit(self.sp, quote_expr!(self.cx, $pc));
self.cx.arm(self.sp, vec!(pc_pat), body)
}
// Creates a wild-card match arm with the expression `body`.
fn wild_arm_expr(&self, body: P<ast::Expr>) -> ast::Arm {
ast::Arm {
attrs: vec!(),
pats: vec!(P(ast::Pat{
id: ast::DUMMY_NODE_ID,
span: self.sp,
node: ast::PatKind::Wild,
})),
guard: None,
body: body,
}
}
// Converts `xs` to a `[x1, x2, .., xN]` expression by calling `to_expr`
// on each element in `xs`.
fn vec_expr<T, It: Iterator<Item=T>>(
&self,
xs: It,
to_expr: &mut FnMut(&ExtCtxt, T) -> P<ast::Expr>,
) -> P<ast::Expr> {
let exprs = xs.map(|x| to_expr(self.cx, x)).collect();
self.cx.expr_vec(self.sp, exprs)
}
}
/// Looks for a single string literal and returns it.
/// Otherwise, logs an error with cx.span_err and returns None.
fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
let mut parser = cx.new_parser_from_tts(tts);
if let Ok(expr) = parser.parse_expr() {
let entry = cx.expander().fold_expr(expr);
let regex = match entry.node {
ast::ExprKind::Lit(ref lit) => {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::lit_to_string(&**lit)));
return None
}
}
}
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::expr_to_string(&*entry)));
return None
}
};
if !parser.eat(&token::Eof) {
cx.span_err(parser.span, "only one string literal allowed");
return None;
}
Some(regex)
} else {
cx.parse_sess().span_diagnostic.err("failure parsing token tree");
None
}
}
| match_class | identifier_name |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This crate provides the `regex!` macro. Its use is documented in the
//! `regex` crate.
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate regex;
extern crate regex_syntax;
extern crate rustc_plugin;
extern crate syntax;
use std::collections::BTreeMap;
use std::usize;
use syntax::ast;
use syntax::codemap;
use syntax::ext::build::AstBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::parse::token;
use syntax::print::pprust;
use syntax::fold::Folder;
use syntax::ptr::P;
use rustc_plugin::Registry;
use regex::internal::{Compiler, EmptyLook, Inst, Program};
use regex_syntax::Expr;
/// For the `regex!` syntax extension. Do not use.
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("regex", native);
}
/// Generates specialized code for the Pike VM for a particular regular
/// expression.
///
/// There are two primary differences between the code generated here and the
/// general code in vm.rs.
///
/// 1. All heap allocation is removed. Sized vector types are used instead.
/// Care must be taken to make sure that these vectors are not copied
/// gratuitously. (If you're not sure, run the benchmarks. They will yell
/// at you if you do.)
/// 2. The main `match instruction { ... }` expressions are replaced with more
/// direct `match pc { ... }`. The generators can be found in
/// `step_insts` and `add_insts`.
///
/// It is strongly recommended to read the dynamic implementation in vm.rs
/// first before trying to understand the code generator. The implementation
/// strategy is identical and vm.rs has comments and will be easier to follow.
fn native(cx: &mut ExtCtxt, sp: codemap::Span, tts: &[ast::TokenTree])
-> Box<MacResult+'static> {
let regex = match parse(cx, tts) {
Some(r) => r,
// error is logged in 'parse' with cx.span_err
None => return DummyResult::any(sp),
};
// We use the largest possible size limit because this is happening at
// compile time. We trust the programmer.
let expr = match Expr::parse(®ex) {
Ok(expr) => expr,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let prog = match Compiler::new().size_limit(usize::MAX).compile(&[expr]) {
Ok(re) => re,
Err(err) => {
cx.span_err(sp, &err.to_string());
return DummyResult::any(sp)
}
};
let names = prog.captures.iter().cloned().collect();
let mut gen = NfaGen {
cx: &*cx,
sp: sp,
prog: prog,
names: names,
original: regex,
};
MacEager::expr(gen.code())
}
struct NfaGen<'a> {
cx: &'a ExtCtxt<'a>,
sp: codemap::Span,
prog: Program,
names: Vec<Option<String>>,
original: String,
}
impl<'a> NfaGen<'a> {
fn code(&mut self) -> P<ast::Expr> {
// Most or all of the following things are used in the quasiquoted
// expression returned.
let num_cap_locs = 2 * self.prog.captures.len();
let num_insts = self.prog.len();
let cap_names = self.vec_expr(self.names.iter(),
&mut |cx, name| match *name {
Some(ref name) => {
let name = &**name;
quote_expr!(cx, Some($name))
}
None => cx.expr_none(self.sp),
}
);
let capture_name_idx = {
let mut capture_name_idx = BTreeMap::new();
for (i, name) in self.names.iter().enumerate() {
if let Some(ref name) = *name {
capture_name_idx.insert(name.to_owned(), i);
}
}
self.vec_expr(capture_name_idx.iter(),
&mut |cx, (name, group_idx)|
quote_expr!(cx, ($name, $group_idx))
)
};
let is_anchored_start = self.prog.is_anchored_start;
let step_insts = self.step_insts();
let add_insts = self.add_insts();
let regex = &*self.original;
quote_expr!(self.cx, {
// When `regex!` is bound to a name that is not used, we have to make sure
// that dead_code warnings don't bubble up to the user from the generated
// code. Therefore, we suppress them by allowing dead_code. The effect is that
// the user is only warned about *their* unused variable/code, and not the
// unused code generated by regex!. See #14185 for an example.
#[allow(dead_code)]
static CAPTURES: &'static [Option<&'static str>] = &$cap_names;
#[allow(dead_code)]
static CAPTURE_NAME_IDX: &'static [(&'static str, usize)] = &$capture_name_idx;
#[allow(dead_code)]
fn exec<'t>(
mut caps: &mut [Option<usize>],
input: &'t str,
start: usize,
) -> bool {
#![allow(unused_imports)]
#![allow(unused_mut)]
use regex::internal::{Char, CharInput, InputAt, Input, Inst};
let input = CharInput::new(input.as_bytes());
let at = input.at(start);
return Nfa {
input: input,
ncaps: caps.len(),
}.exec(&mut NfaThreads::new(), &mut caps, at);
struct Nfa<'t> {
input: CharInput<'t>,
ncaps: usize,
}
impl<'t> Nfa<'t> {
#[allow(unused_variables)]
fn exec(
&mut self,
mut q: &mut NfaThreads,
mut caps: &mut [Option<usize>],
mut at: InputAt,
) -> bool {
let mut matched = false;
let (mut clist, mut nlist) = (&mut q.clist, &mut q.nlist);
clist.empty(); nlist.empty();
'LOOP: loop {
if clist.size == 0 {
if matched || (!at.is_start() && $is_anchored_start) {
break;
}
// TODO: Prefix matching... Hmm.
// Prefix matching now uses a DFA, so I think this is
// going to require encoding that DFA statically.
}
if clist.size == 0 || (!$is_anchored_start && !matched) {
self.add(clist, &mut caps, 0, at);
}
let at_next = self.input.at(at.next_pos());
for i in 0..clist.size {
let pc = clist.pc(i);
let tcaps = clist.caps(i);
if self.step(nlist, caps, tcaps, pc, at, at_next) {
matched = true;
if caps.len() == 0 {
break 'LOOP;
}
break;
}
}
if at.char().is_none() {
break;
}
at = at_next;
::std::mem::swap(&mut clist, &mut nlist);
nlist.empty();
}
matched
}
// Sometimes `nlist` is never used (for empty regexes).
#[allow(unused_variables)]
#[inline]
fn step(
&self,
nlist: &mut Threads,
caps: &mut [Option<usize>],
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
at_next: InputAt,
) -> bool {
$step_insts;
false
}
fn add(
&self,
nlist: &mut Threads,
thread_caps: &mut [Option<usize>],
pc: usize,
at: InputAt,
) {
if nlist.contains(pc) {
return;
}
let ti = nlist.add(pc);
$add_insts
}
}
struct NfaThreads {
clist: Threads,
nlist: Threads,
}
struct Threads {
dense: [Thread; $num_insts],
sparse: [usize; $num_insts],
size: usize,
}
struct Thread {
pc: usize,
caps: [Option<usize>; $num_cap_locs],
}
impl NfaThreads {
fn new() -> NfaThreads {
NfaThreads {
clist: Threads::new(),
nlist: Threads::new(),
}
}
fn swap(&mut self) {
::std::mem::swap(&mut self.clist, &mut self.nlist);
}
}
impl Threads {
fn new() -> Threads {
Threads {
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
dense: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: usize) -> usize {
let i = self.size;
self.dense[i].pc = pc;
self.sparse[pc] = i;
self.size += 1;
i
}
#[inline]
fn thread(&mut self, i: usize) -> &mut Thread {
&mut self.dense[i]
}
#[inline]
fn contains(&self, pc: usize) -> bool {
let s = unsafe { ::std::ptr::read_volatile(&self.sparse[pc]) };
s < self.size && self.dense[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: usize) -> usize {
self.dense[i].pc
}
#[inline]
fn caps<'r>(&'r mut self, i: usize) -> &'r mut [Option<usize>] {
&mut self.dense[i].caps
}
}
}
::regex::Regex(::regex::internal::_Regex::Plugin(::regex::internal::Plugin {
original: $regex,
names: &CAPTURES,
groups: &CAPTURE_NAME_IDX,
prog: exec,
}))
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::EmptyLook(ref inst) => {
let nextpc = inst.goto;
match inst.look {
EmptyLook::StartLine => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() || prev == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndLine => {
quote_expr!(self.cx, {
if at.char().is_none() || at.char() == '\n' {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::StartText => {
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
if prev.is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::EndText => {
quote_expr!(self.cx, {
if at.char().is_none() {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundary
| EmptyLook::NotWordBoundary => {
let m = if inst.look == EmptyLook::WordBoundary {
quote_expr!(self.cx, { w1 ^ w2 })
} else {
quote_expr!(self.cx, { !(w1 ^ w2) })
};
quote_expr!(self.cx, {
let prev = self.input.previous_char(at);
let w1 = prev.is_word_char();
let w2 = at.char().is_word_char();
if $m {
self.add(nlist, thread_caps, $nextpc, at);
}
})
}
EmptyLook::WordBoundaryAscii
| EmptyLook::NotWordBoundaryAscii => {
unreachable!()
}
}
}
Inst::Save(ref inst) => {
let nextpc = inst.goto;
let slot = inst.slot;
quote_expr!(self.cx, {
if $slot >= self.ncaps {
self.add(nlist, thread_caps, $nextpc, at);
} else {
let old = thread_caps[$slot];
thread_caps[$slot] = Some(at.pos());
self.add(nlist, thread_caps, $nextpc, at);
thread_caps[$slot] = old;
}
})
}
Inst::Split(ref inst) => {
let (x, y) = (inst.goto1, inst.goto2);
quote_expr!(self.cx, {
self.add(nlist, thread_caps, $x, at);
self.add(nlist, thread_caps, $y, at);
})
}
// For Match, Char, Ranges
_ => quote_expr!(self.cx, {
let mut t = &mut nlist.thread(ti);
for (slot, val) in t.caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
}),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> P<ast::Expr> {
let arms = self.prog.iter().enumerate().map(|(pc, inst)| {
let body = match *inst {
Inst::Match(_) => quote_expr!(self.cx, {
for (slot, val) in caps.iter_mut().zip(thread_caps.iter()) {
*slot = *val;
}
return true;
}),
Inst::Char(ref inst) => {
let nextpc = inst.goto;
let c = inst.c;
quote_expr!(self.cx, {
if $c == at.char() {
self.add(nlist, thread_caps, $nextpc, at_next);
}
return false;
})
}
Inst::Ranges(ref inst) => {
let match_class = self.match_class(&inst.ranges);
let nextpc = inst.goto;
quote_expr!(self.cx, {
let mut c = at.char();
if let Some(c) = c.as_char() {
if $match_class { | }
return false;
})
}
// EmptyLook, Save, Jump, Split
_ => quote_expr!(self.cx, { return false; }),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Translates a character class into a match expression.
// This avoids a binary search (and is hopefully replaced by a jump
// table).
fn match_class(&self, ranges: &[(char, char)]) -> P<ast::Expr> {
let mut arms = ranges.iter().map(|&(start, end)| {
let pat = self.cx.pat(
self.sp, ast::PatKind::Range(
quote_expr!(self.cx, $start), quote_expr!(self.cx, $end)));
self.cx.arm(self.sp, vec!(pat), quote_expr!(self.cx, true))
}).collect::<Vec<ast::Arm>>();
arms.push(self.wild_arm_expr(quote_expr!(self.cx, false)));
let match_on = quote_expr!(self.cx, c);
self.cx.expr_match(self.sp, match_on, arms)
}
// Generates code for checking a literal prefix of the search string.
// The code is only generated if the regex *has* a literal prefix.
// Otherwise, a no-op is returned.
// fn check_prefix(&self) -> P<ast::Expr> {
// if self.prog.prefixes.len() == 0 {
// self.empty_block()
// } else {
// quote_expr!(self.cx,
// if clist.size == 0 {
// let haystack = &self.input.as_bytes()[self.ic..];
// match find_prefix(prefix_bytes, haystack) {
// None => break,
// Some(i) => {
// self.ic += i;
// next_ic = self.chars.set(self.ic);
// }
// }
// }
// )
// }
// }
// Builds a `match pc { ... }` expression from a list of arms, specifically
// for matching the current program counter with an instruction.
// A wild-card arm is automatically added that executes a no-op. It will
// never be used, but is added to satisfy the compiler complaining about
// non-exhaustive patterns.
fn match_insts(&self, mut arms: Vec<ast::Arm>) -> P<ast::Expr> {
arms.push(self.wild_arm_expr(self.empty_block()));
self.cx.expr_match(self.sp, quote_expr!(self.cx, pc), arms)
}
fn empty_block(&self) -> P<ast::Expr> {
quote_expr!(self.cx, {})
}
// Creates a match arm for the instruction at `pc` with the expression
// `body`.
fn arm_inst(&self, pc: usize, body: P<ast::Expr>) -> ast::Arm {
let pc_pat = self.cx.pat_lit(self.sp, quote_expr!(self.cx, $pc));
self.cx.arm(self.sp, vec!(pc_pat), body)
}
// Creates a wild-card match arm with the expression `body`.
fn wild_arm_expr(&self, body: P<ast::Expr>) -> ast::Arm {
ast::Arm {
attrs: vec!(),
pats: vec!(P(ast::Pat{
id: ast::DUMMY_NODE_ID,
span: self.sp,
node: ast::PatKind::Wild,
})),
guard: None,
body: body,
}
}
// Converts `xs` to a `[x1, x2, .., xN]` expression by calling `to_expr`
// on each element in `xs`.
fn vec_expr<T, It: Iterator<Item=T>>(
&self,
xs: It,
to_expr: &mut FnMut(&ExtCtxt, T) -> P<ast::Expr>,
) -> P<ast::Expr> {
let exprs = xs.map(|x| to_expr(self.cx, x)).collect();
self.cx.expr_vec(self.sp, exprs)
}
}
/// Looks for a single string literal and returns it.
/// Otherwise, logs an error with cx.span_err and returns None.
fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
let mut parser = cx.new_parser_from_tts(tts);
if let Ok(expr) = parser.parse_expr() {
let entry = cx.expander().fold_expr(expr);
let regex = match entry.node {
ast::ExprKind::Lit(ref lit) => {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::lit_to_string(&**lit)));
return None
}
}
}
_ => {
cx.span_err(entry.span, &format!(
"expected string literal but got `{}`",
pprust::expr_to_string(&*entry)));
return None
}
};
if !parser.eat(&token::Eof) {
cx.span_err(parser.span, "only one string literal allowed");
return None;
}
Some(regex)
} else {
cx.parse_sess().span_diagnostic.err("failure parsing token tree");
None
}
} | self.add(nlist, thread_caps, $nextpc, at_next);
} | random_line_split |
query19.rs | use timely::dataflow::*;
use timely::dataflow::operators::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::AsCollection;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
// use differential_dataflow::difference::DiffPair;
use ::Collections;
// use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
} | println!("TODO: query 19 could use some _u attention");
let lineitems =
collections
.lineitems()
.inner
.flat_map(|(x,t,d)|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), t, d * (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.as_collection();
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| (x.0, ()));
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| (x.0, ()));
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| (x.0, ()));
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0);
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0);
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PCG"))).map(|x| x.0);
let result1 = lines1.semijoin_u(&parts1);
let result2 = lines2.semijoin_u(&parts2);
let result3 = lines3.semijoin_u(&parts3);
result1
.concat(&result2)
.concat(&result3)
.map(|(x,_)| x)
.count_u()
.probe()
} |
pub fn query<G: Scope>(collections: &mut Collections<G>) -> ProbeHandle<G::Timestamp>
where G::Timestamp: Lattice+Ord {
| random_line_split |
query19.rs | use timely::dataflow::*;
use timely::dataflow::operators::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::AsCollection;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
// use differential_dataflow::difference::DiffPair;
use ::Collections;
// use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>) -> ProbeHandle<G::Timestamp>
where G::Timestamp: Lattice+Ord {
println!("TODO: query 19 could use some _u attention");
let lineitems =
collections
.lineitems()
.inner
.flat_map(|(x,t,d)|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") |
else { None }
)
.as_collection();
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| (x.0, ()));
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| (x.0, ()));
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| (x.0, ()));
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0);
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0);
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PCG"))).map(|x| x.0);
let result1 = lines1.semijoin_u(&parts1);
let result2 = lines2.semijoin_u(&parts2);
let result3 = lines3.semijoin_u(&parts3);
result1
.concat(&result2)
.concat(&result3)
.map(|(x,_)| x)
.count_u()
.probe()
} | {
Some(((x.part_key, x.quantity), t, d * (x.extended_price * (100 - x.discount) / 100) as isize))
} | conditional_block |
query19.rs | use timely::dataflow::*;
use timely::dataflow::operators::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::AsCollection;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
// use differential_dataflow::difference::DiffPair;
use ::Collections;
// use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool |
pub fn query<G: Scope>(collections: &mut Collections<G>) -> ProbeHandle<G::Timestamp>
where G::Timestamp: Lattice+Ord {
println!("TODO: query 19 could use some _u attention");
let lineitems =
collections
.lineitems()
.inner
.flat_map(|(x,t,d)|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), t, d * (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.as_collection();
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| (x.0, ()));
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| (x.0, ()));
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| (x.0, ()));
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0);
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0);
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PCG"))).map(|x| x.0);
let result1 = lines1.semijoin_u(&parts1);
let result2 = lines2.semijoin_u(&parts2);
let result3 = lines3.semijoin_u(&parts3);
result1
.concat(&result2)
.concat(&result3)
.map(|(x,_)| x)
.count_u()
.probe()
} | {
source.len() >= query.len() && &source[..query.len()] == query
} | identifier_body |
query19.rs | use timely::dataflow::*;
use timely::dataflow::operators::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::AsCollection;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
// use differential_dataflow::difference::DiffPair;
use ::Collections;
// use ::types::create_date;
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn | <G: Scope>(collections: &mut Collections<G>) -> ProbeHandle<G::Timestamp>
where G::Timestamp: Lattice+Ord {
println!("TODO: query 19 could use some _u attention");
let lineitems =
collections
.lineitems()
.inner
.flat_map(|(x,t,d)|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), t, d * (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.as_collection();
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| (x.0, ()));
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| (x.0, ()));
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| (x.0, ()));
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0);
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0);
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PCG"))).map(|x| x.0);
let result1 = lines1.semijoin_u(&parts1);
let result2 = lines2.semijoin_u(&parts2);
let result3 = lines3.semijoin_u(&parts3);
result1
.concat(&result2)
.concat(&result3)
.map(|(x,_)| x)
.count_u()
.probe()
} | query | identifier_name |
annotation.py | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Event(models.Model):
|
class Annotation(models.Model):
_inherit = 'myo.annotation'
event_ids = fields.Many2many(
'myo.event',
'myo_event_annotation_rel',
'annotation_id',
'event_id',
'Events'
)
| _inherit = 'myo.event'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_event_annotation_rel',
'event_id',
'annotation_id',
'Annotations'
) | identifier_body |
annotation.py | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Event(models.Model):
_inherit = 'myo.event'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_event_annotation_rel', | 'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
event_ids = fields.Many2many(
'myo.event',
'myo_event_annotation_rel',
'annotation_id',
'event_id',
'Events'
) | 'event_id',
'annotation_id', | random_line_split |
annotation.py | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class | (models.Model):
_inherit = 'myo.event'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_event_annotation_rel',
'event_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
event_ids = fields.Many2many(
'myo.event',
'myo_event_annotation_rel',
'annotation_id',
'event_id',
'Events'
)
| Event | identifier_name |
kendo.culture.he.js | /*
* Kendo UI Web v2014.1.318 (http://kendoui.com)
* Copyright 2014 Telerik AD. All rights reserved.
*
* Kendo UI Web commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-web
* If you do not own a commercial license, this file shall be governed by the
* GNU General Public License (GPL) version 3.
* For GPL requirements, please review: http://www.gnu.org/copyleft/gpl.html
*/
(function(f, define){
define([], f);
})(function(){
(function( window, undefined ) {
var kendo = window.kendo || (window.kendo = { cultures: {} });
kendo.cultures["he"] = {
name: "he",
numberFormat: {
pattern: ["-n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
percent: { | decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "%"
},
currency: {
pattern: ["$-n","$ n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "₪"
}
},
calendars: {
standard: {
days: {
names: ["יום ראשון","יום שני","יום שלישי","יום רביעי","יום חמישי","יום שישי","שבת"],
namesAbbr: ["יום א","יום ב","יום ג","יום ד","יום ה","יום ו","שבת"],
namesShort: ["א","ב","ג","ד","ה","ו","ש"]
},
months: {
names: ["ינואר","פברואר","מרץ","אפריל","מאי","יוני","יולי","אוגוסט","ספטמבר","אוקטובר","נובמבר","דצמבר",""],
namesAbbr: ["ינו","פבר","מרץ","אפר","מאי","יונ","יול","אוג","ספט","אוק","נוב","דצמ",""]
},
AM: ["AM","am","AM"],
PM: ["PM","pm","PM"],
patterns: {
d: "dd/MM/yyyy",
D: "dddd dd MMMM yyyy",
F: "dddd dd MMMM yyyy HH:mm:ss",
g: "dd/MM/yyyy HH:mm",
G: "dd/MM/yyyy HH:mm:ss",
m: "dd MMMM",
M: "dd MMMM",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
t: "HH:mm",
T: "HH:mm:ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
y: "MMMM yyyy",
Y: "MMMM yyyy"
},
"/": "/",
":": ":",
firstDay: 0
}
}
}
})(this);
return window.kendo;
}, typeof define == 'function' && define.amd ? define : function(_, f){ f(); }); | pattern: ["-n%","n%"], | random_line_split |
date.ts | interface Date {
getNextWeekDay(): Date;
endOfDay(): Date;
getNextWeekDayAtMidday(): Date;
}
/**
* Sets the Date object to the next weekday
*/
Date.prototype.getNextWeekDay = function(): Date {
var dt: Date = new Date(this);
switch (this.getDay()) {
case 5:
dt.setUTCDate(this.getUTCDate() + 3);
return dt;
case 6:
dt.setUTCDate(this.getUTCDate() + 2);
return dt;
default:
dt.setUTCDate(this.getUTCDate() + 1);
return dt; |
/**
* Sets the date element of the Date object to the next
* weekday and sets the time element to 12:00:00:000
*/
Date.prototype.getNextWeekDayAtMidday = function () : Date {
var dt: Date = this.getNextWeekDay();
dt.setUTCHours(12);
dt.setUTCMinutes(0);
dt.setUTCSeconds(0);
dt.setUTCMilliseconds(0);
return dt;
}
/**
* Sets the time element of the Date object to 23:59:59:000
*/
Date.prototype.endOfDay = function() : Date {
var dt: Date = this;
dt.setUTCHours(23);
dt.setUTCMinutes(59);
dt.setUTCSeconds(59);
dt.setUTCMilliseconds(0);
return dt;
} | }
} | random_line_split |
ReactPropTransferer.js | /**
* Copyright 2013-2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ReactPropTransferer
*/
"use strict";
var assign = require("./Object.assign");
var emptyFunction = require("./emptyFunction");
var invariant = require("./invariant");
var joinClasses = require("./joinClasses");
var warning = require("./warning");
var didWarn = false;
/**
* Creates a transfer strategy that will merge prop values using the supplied
* `mergeStrategy`. If a prop was previously unset, this just sets it.
*
* @param {function} mergeStrategy
* @return {function}
*/
function createTransferStrategy(mergeStrategy) {
return function(props, key, value) {
if (!props.hasOwnProperty(key)) {
props[key] = value;
} else {
props[key] = mergeStrategy(props[key], value);
}
};
}
var transferStrategyMerge = createTransferStrategy(function(a, b) {
// `merge` overrides the first object's (`props[key]` above) keys using the
// second object's (`value`) keys. An object's style's existing `propA` would
// get overridden. Flip the order here.
return assign({}, b, a);
});
/**
* Transfer strategies dictate how props are transferred by `transferPropsTo`.
* NOTE: if you add any more exceptions to this list you should be sure to
* update `cloneWithProps()` accordingly.
*/
var TransferStrategies = {
/**
* Never transfer `children`.
*/
children: emptyFunction,
/**
* Transfer the `className` prop by merging them.
*/
className: createTransferStrategy(joinClasses),
/**
* Transfer the `style` prop (which is an object) by merging them.
*/
style: transferStrategyMerge
};
/**
* Mutates the first argument by transferring the properties from the second
* argument.
*
* @param {object} props
* @param {object} newProps
* @return {object}
*/
function transferInto(props, newProps) {
for (var thisKey in newProps) {
if (!newProps.hasOwnProperty(thisKey)) {
continue;
}
var transferStrategy = TransferStrategies[thisKey];
if (transferStrategy && TransferStrategies.hasOwnProperty(thisKey)) {
transferStrategy(props, thisKey, newProps[thisKey]);
} else if (!props.hasOwnProperty(thisKey)) {
props[thisKey] = newProps[thisKey];
}
}
return props;
}
/**
* ReactPropTransferer are capable of transferring props to another component
* using a `transferPropsTo` method.
*
* @class ReactPropTransferer
*/
var ReactPropTransferer = {
TransferStrategies: TransferStrategies,
/**
* Merge two props objects using TransferStrategies.
*
* @param {object} oldProps original props (they take precedence)
* @param {object} newProps new props to merge in
* @return {object} a new object containing both sets of props merged.
*/
mergeProps: function(oldProps, newProps) {
return transferInto(assign({}, oldProps), newProps);
},
/**
* @lends {ReactPropTransferer.prototype}
*/
Mixin: {
/**
* Transfer props from this component to a target component.
*
* Props that do not have an explicit transfer strategy will be transferred
* only if the target component does not already have the prop set.
*
* This is usually used to pass down props to a returned root component.
*
* @param {ReactElement} element Component receiving the properties.
* @return {ReactElement} The supplied `component`.
* @final
* @protected
*/
transferPropsTo: function(element) {
("production" !== process.env.NODE_ENV ? invariant(
element._owner === this,
'%s: You can\'t call transferPropsTo() on a component that you ' +
'don\'t own, %s. This usually means you are calling ' +
'transferPropsTo() on a component passed in as props or children.',
this.constructor.displayName,
typeof element.type === 'string' ?
element.type :
element.type.displayName
) : invariant(element._owner === this));
if ("production" !== process.env.NODE_ENV) |
// Because elements are immutable we have to merge into the existing
// props object rather than clone it.
transferInto(element.props, this.props);
return element;
}
}
};
module.exports = ReactPropTransferer;
| {
if (!didWarn) {
didWarn = true;
("production" !== process.env.NODE_ENV ? warning(
false,
'transferPropsTo is deprecated. ' +
'See http://fb.me/react-transferpropsto for more information.'
) : null);
}
} | conditional_block |
ReactPropTransferer.js | /**
* Copyright 2013-2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ReactPropTransferer
*/
"use strict";
var assign = require("./Object.assign");
var emptyFunction = require("./emptyFunction");
var invariant = require("./invariant");
var joinClasses = require("./joinClasses");
var warning = require("./warning");
var didWarn = false;
/**
* Creates a transfer strategy that will merge prop values using the supplied
* `mergeStrategy`. If a prop was previously unset, this just sets it.
*
* @param {function} mergeStrategy
* @return {function}
*/
function createTransferStrategy(mergeStrategy) {
return function(props, key, value) {
if (!props.hasOwnProperty(key)) {
props[key] = value;
} else {
props[key] = mergeStrategy(props[key], value);
}
};
}
var transferStrategyMerge = createTransferStrategy(function(a, b) {
// `merge` overrides the first object's (`props[key]` above) keys using the
// second object's (`value`) keys. An object's style's existing `propA` would
// get overridden. Flip the order here.
return assign({}, b, a);
});
/**
* Transfer strategies dictate how props are transferred by `transferPropsTo`.
* NOTE: if you add any more exceptions to this list you should be sure to
* update `cloneWithProps()` accordingly.
*/
var TransferStrategies = {
/**
* Never transfer `children`.
*/
children: emptyFunction,
/**
* Transfer the `className` prop by merging them.
*/
className: createTransferStrategy(joinClasses),
/**
* Transfer the `style` prop (which is an object) by merging them.
*/
style: transferStrategyMerge
};
/**
* Mutates the first argument by transferring the properties from the second
* argument.
*
* @param {object} props
* @param {object} newProps
* @return {object}
*/
function transferInto(props, newProps) {
for (var thisKey in newProps) {
if (!newProps.hasOwnProperty(thisKey)) {
continue;
}
var transferStrategy = TransferStrategies[thisKey];
if (transferStrategy && TransferStrategies.hasOwnProperty(thisKey)) {
transferStrategy(props, thisKey, newProps[thisKey]);
} else if (!props.hasOwnProperty(thisKey)) {
props[thisKey] = newProps[thisKey];
}
}
return props;
}
/**
* ReactPropTransferer are capable of transferring props to another component
* using a `transferPropsTo` method.
*
* @class ReactPropTransferer
*/
var ReactPropTransferer = {
TransferStrategies: TransferStrategies,
/** | * @param {object} newProps new props to merge in
* @return {object} a new object containing both sets of props merged.
*/
mergeProps: function(oldProps, newProps) {
return transferInto(assign({}, oldProps), newProps);
},
/**
* @lends {ReactPropTransferer.prototype}
*/
Mixin: {
/**
* Transfer props from this component to a target component.
*
* Props that do not have an explicit transfer strategy will be transferred
* only if the target component does not already have the prop set.
*
* This is usually used to pass down props to a returned root component.
*
* @param {ReactElement} element Component receiving the properties.
* @return {ReactElement} The supplied `component`.
* @final
* @protected
*/
transferPropsTo: function(element) {
("production" !== process.env.NODE_ENV ? invariant(
element._owner === this,
'%s: You can\'t call transferPropsTo() on a component that you ' +
'don\'t own, %s. This usually means you are calling ' +
'transferPropsTo() on a component passed in as props or children.',
this.constructor.displayName,
typeof element.type === 'string' ?
element.type :
element.type.displayName
) : invariant(element._owner === this));
if ("production" !== process.env.NODE_ENV) {
if (!didWarn) {
didWarn = true;
("production" !== process.env.NODE_ENV ? warning(
false,
'transferPropsTo is deprecated. ' +
'See http://fb.me/react-transferpropsto for more information.'
) : null);
}
}
// Because elements are immutable we have to merge into the existing
// props object rather than clone it.
transferInto(element.props, this.props);
return element;
}
}
};
module.exports = ReactPropTransferer; | * Merge two props objects using TransferStrategies.
*
* @param {object} oldProps original props (they take precedence) | random_line_split |
ReactPropTransferer.js | /**
* Copyright 2013-2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ReactPropTransferer
*/
"use strict";
var assign = require("./Object.assign");
var emptyFunction = require("./emptyFunction");
var invariant = require("./invariant");
var joinClasses = require("./joinClasses");
var warning = require("./warning");
var didWarn = false;
/**
* Creates a transfer strategy that will merge prop values using the supplied
* `mergeStrategy`. If a prop was previously unset, this just sets it.
*
* @param {function} mergeStrategy
* @return {function}
*/
function createTransferStrategy(mergeStrategy) {
return function(props, key, value) {
if (!props.hasOwnProperty(key)) {
props[key] = value;
} else {
props[key] = mergeStrategy(props[key], value);
}
};
}
var transferStrategyMerge = createTransferStrategy(function(a, b) {
// `merge` overrides the first object's (`props[key]` above) keys using the
// second object's (`value`) keys. An object's style's existing `propA` would
// get overridden. Flip the order here.
return assign({}, b, a);
});
/**
* Transfer strategies dictate how props are transferred by `transferPropsTo`.
* NOTE: if you add any more exceptions to this list you should be sure to
* update `cloneWithProps()` accordingly.
*/
var TransferStrategies = {
/**
* Never transfer `children`.
*/
children: emptyFunction,
/**
* Transfer the `className` prop by merging them.
*/
className: createTransferStrategy(joinClasses),
/**
* Transfer the `style` prop (which is an object) by merging them.
*/
style: transferStrategyMerge
};
/**
* Mutates the first argument by transferring the properties from the second
* argument.
*
* @param {object} props
* @param {object} newProps
* @return {object}
*/
function transferInto(props, newProps) |
/**
* ReactPropTransferer are capable of transferring props to another component
* using a `transferPropsTo` method.
*
* @class ReactPropTransferer
*/
var ReactPropTransferer = {
TransferStrategies: TransferStrategies,
/**
* Merge two props objects using TransferStrategies.
*
* @param {object} oldProps original props (they take precedence)
* @param {object} newProps new props to merge in
* @return {object} a new object containing both sets of props merged.
*/
mergeProps: function(oldProps, newProps) {
return transferInto(assign({}, oldProps), newProps);
},
/**
* @lends {ReactPropTransferer.prototype}
*/
Mixin: {
/**
* Transfer props from this component to a target component.
*
* Props that do not have an explicit transfer strategy will be transferred
* only if the target component does not already have the prop set.
*
* This is usually used to pass down props to a returned root component.
*
* @param {ReactElement} element Component receiving the properties.
* @return {ReactElement} The supplied `component`.
* @final
* @protected
*/
transferPropsTo: function(element) {
("production" !== process.env.NODE_ENV ? invariant(
element._owner === this,
'%s: You can\'t call transferPropsTo() on a component that you ' +
'don\'t own, %s. This usually means you are calling ' +
'transferPropsTo() on a component passed in as props or children.',
this.constructor.displayName,
typeof element.type === 'string' ?
element.type :
element.type.displayName
) : invariant(element._owner === this));
if ("production" !== process.env.NODE_ENV) {
if (!didWarn) {
didWarn = true;
("production" !== process.env.NODE_ENV ? warning(
false,
'transferPropsTo is deprecated. ' +
'See http://fb.me/react-transferpropsto for more information.'
) : null);
}
}
// Because elements are immutable we have to merge into the existing
// props object rather than clone it.
transferInto(element.props, this.props);
return element;
}
}
};
module.exports = ReactPropTransferer;
| {
for (var thisKey in newProps) {
if (!newProps.hasOwnProperty(thisKey)) {
continue;
}
var transferStrategy = TransferStrategies[thisKey];
if (transferStrategy && TransferStrategies.hasOwnProperty(thisKey)) {
transferStrategy(props, thisKey, newProps[thisKey]);
} else if (!props.hasOwnProperty(thisKey)) {
props[thisKey] = newProps[thisKey];
}
}
return props;
} | identifier_body |
ReactPropTransferer.js | /**
* Copyright 2013-2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ReactPropTransferer
*/
"use strict";
var assign = require("./Object.assign");
var emptyFunction = require("./emptyFunction");
var invariant = require("./invariant");
var joinClasses = require("./joinClasses");
var warning = require("./warning");
var didWarn = false;
/**
* Creates a transfer strategy that will merge prop values using the supplied
* `mergeStrategy`. If a prop was previously unset, this just sets it.
*
* @param {function} mergeStrategy
* @return {function}
*/
function | (mergeStrategy) {
return function(props, key, value) {
if (!props.hasOwnProperty(key)) {
props[key] = value;
} else {
props[key] = mergeStrategy(props[key], value);
}
};
}
var transferStrategyMerge = createTransferStrategy(function(a, b) {
// `merge` overrides the first object's (`props[key]` above) keys using the
// second object's (`value`) keys. An object's style's existing `propA` would
// get overridden. Flip the order here.
return assign({}, b, a);
});
/**
* Transfer strategies dictate how props are transferred by `transferPropsTo`.
* NOTE: if you add any more exceptions to this list you should be sure to
* update `cloneWithProps()` accordingly.
*/
var TransferStrategies = {
/**
* Never transfer `children`.
*/
children: emptyFunction,
/**
* Transfer the `className` prop by merging them.
*/
className: createTransferStrategy(joinClasses),
/**
* Transfer the `style` prop (which is an object) by merging them.
*/
style: transferStrategyMerge
};
/**
* Mutates the first argument by transferring the properties from the second
* argument.
*
* @param {object} props
* @param {object} newProps
* @return {object}
*/
function transferInto(props, newProps) {
for (var thisKey in newProps) {
if (!newProps.hasOwnProperty(thisKey)) {
continue;
}
var transferStrategy = TransferStrategies[thisKey];
if (transferStrategy && TransferStrategies.hasOwnProperty(thisKey)) {
transferStrategy(props, thisKey, newProps[thisKey]);
} else if (!props.hasOwnProperty(thisKey)) {
props[thisKey] = newProps[thisKey];
}
}
return props;
}
/**
* ReactPropTransferer are capable of transferring props to another component
* using a `transferPropsTo` method.
*
* @class ReactPropTransferer
*/
var ReactPropTransferer = {
TransferStrategies: TransferStrategies,
/**
* Merge two props objects using TransferStrategies.
*
* @param {object} oldProps original props (they take precedence)
* @param {object} newProps new props to merge in
* @return {object} a new object containing both sets of props merged.
*/
mergeProps: function(oldProps, newProps) {
return transferInto(assign({}, oldProps), newProps);
},
/**
* @lends {ReactPropTransferer.prototype}
*/
Mixin: {
/**
* Transfer props from this component to a target component.
*
* Props that do not have an explicit transfer strategy will be transferred
* only if the target component does not already have the prop set.
*
* This is usually used to pass down props to a returned root component.
*
* @param {ReactElement} element Component receiving the properties.
* @return {ReactElement} The supplied `component`.
* @final
* @protected
*/
transferPropsTo: function(element) {
("production" !== process.env.NODE_ENV ? invariant(
element._owner === this,
'%s: You can\'t call transferPropsTo() on a component that you ' +
'don\'t own, %s. This usually means you are calling ' +
'transferPropsTo() on a component passed in as props or children.',
this.constructor.displayName,
typeof element.type === 'string' ?
element.type :
element.type.displayName
) : invariant(element._owner === this));
if ("production" !== process.env.NODE_ENV) {
if (!didWarn) {
didWarn = true;
("production" !== process.env.NODE_ENV ? warning(
false,
'transferPropsTo is deprecated. ' +
'See http://fb.me/react-transferpropsto for more information.'
) : null);
}
}
// Because elements are immutable we have to merge into the existing
// props object rather than clone it.
transferInto(element.props, this.props);
return element;
}
}
};
module.exports = ReactPropTransferer;
| createTransferStrategy | identifier_name |
any_unique_aliases_generated.rs | // automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_ANY_UNIQUE_ALIASES: u8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_ANY_UNIQUE_ALIASES: u8 = 3;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_ANY_UNIQUE_ALIASES: [AnyUniqueAliases; 4] = [
AnyUniqueAliases::NONE,
AnyUniqueAliases::M,
AnyUniqueAliases::TS,
AnyUniqueAliases::M2,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct AnyUniqueAliases(pub u8);
#[allow(non_upper_case_globals)]
impl AnyUniqueAliases {
pub const NONE: Self = Self(0);
pub const M: Self = Self(1);
pub const TS: Self = Self(2);
pub const M2: Self = Self(3);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 3;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::M,
Self::TS,
Self::M2,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::M => Some("M"),
Self::TS => Some("TS"),
Self::M2 => Some("M2"),
_ => None,
}
}
}
impl std::fmt::Debug for AnyUniqueAliases {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for AnyUniqueAliases {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe {
flatbuffers::read_scalar_at::<u8>(buf, loc)
};
Self(b)
}
}
impl flatbuffers::Push for AnyUniqueAliases {
type Output = AnyUniqueAliases;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for AnyUniqueAliases {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self |
}
impl<'a> flatbuffers::Verifiable for AnyUniqueAliases {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for AnyUniqueAliases {}
pub struct AnyUniqueAliasesUnionTableOffset {}
#[allow(clippy::upper_case_acronyms)]
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq)]
pub enum AnyUniqueAliasesT {
NONE,
M(Box<MonsterT>),
TS(Box<TestSimpleTableWithEnumT>),
M2(Box<super::example_2::MonsterT>),
}
impl Default for AnyUniqueAliasesT {
fn default() -> Self {
Self::NONE
}
}
impl AnyUniqueAliasesT {
pub fn any_unique_aliases_type(&self) -> AnyUniqueAliases {
match self {
Self::NONE => AnyUniqueAliases::NONE,
Self::M(_) => AnyUniqueAliases::M,
Self::TS(_) => AnyUniqueAliases::TS,
Self::M2(_) => AnyUniqueAliases::M2,
}
}
pub fn pack(&self, fbb: &mut flatbuffers::FlatBufferBuilder) -> Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>> {
match self {
Self::NONE => None,
Self::M(v) => Some(v.pack(fbb).as_union_value()),
Self::TS(v) => Some(v.pack(fbb).as_union_value()),
Self::M2(v) => Some(v.pack(fbb).as_union_value()),
}
}
/// If the union variant matches, return the owned MonsterT, setting the union to NONE.
pub fn take_m(&mut self) -> Option<Box<MonsterT>> {
if let Self::M(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the MonsterT.
pub fn as_m(&self) -> Option<&MonsterT> {
if let Self::M(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the MonsterT.
pub fn as_m_mut(&mut self) -> Option<&mut MonsterT> {
if let Self::M(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned TestSimpleTableWithEnumT, setting the union to NONE.
pub fn take_ts(&mut self) -> Option<Box<TestSimpleTableWithEnumT>> {
if let Self::TS(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::TS(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the TestSimpleTableWithEnumT.
pub fn as_ts(&self) -> Option<&TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the TestSimpleTableWithEnumT.
pub fn as_ts_mut(&mut self) -> Option<&mut TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned super::example_2::MonsterT, setting the union to NONE.
pub fn take_m2(&mut self) -> Option<Box<super::example_2::MonsterT>> {
if let Self::M2(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M2(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the super::example_2::MonsterT.
pub fn as_m2(&self) -> Option<&super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the super::example_2::MonsterT.
pub fn as_m2_mut(&mut self) -> Option<&mut super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_mut()) } else { None }
}
}
| {
let b = u8::from_le(self.0);
Self(b)
} | identifier_body |
any_unique_aliases_generated.rs | // automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_ANY_UNIQUE_ALIASES: u8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_ANY_UNIQUE_ALIASES: u8 = 3;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_ANY_UNIQUE_ALIASES: [AnyUniqueAliases; 4] = [
AnyUniqueAliases::NONE,
AnyUniqueAliases::M,
AnyUniqueAliases::TS,
AnyUniqueAliases::M2,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct AnyUniqueAliases(pub u8);
#[allow(non_upper_case_globals)]
impl AnyUniqueAliases {
pub const NONE: Self = Self(0);
pub const M: Self = Self(1);
pub const TS: Self = Self(2);
pub const M2: Self = Self(3);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 3;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::M,
Self::TS,
Self::M2,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::M => Some("M"),
Self::TS => Some("TS"),
Self::M2 => Some("M2"),
_ => None,
}
}
}
impl std::fmt::Debug for AnyUniqueAliases {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for AnyUniqueAliases {
type Inner = Self;
#[inline]
fn | (buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe {
flatbuffers::read_scalar_at::<u8>(buf, loc)
};
Self(b)
}
}
impl flatbuffers::Push for AnyUniqueAliases {
type Output = AnyUniqueAliases;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for AnyUniqueAliases {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self {
let b = u8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for AnyUniqueAliases {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for AnyUniqueAliases {}
pub struct AnyUniqueAliasesUnionTableOffset {}
#[allow(clippy::upper_case_acronyms)]
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq)]
pub enum AnyUniqueAliasesT {
NONE,
M(Box<MonsterT>),
TS(Box<TestSimpleTableWithEnumT>),
M2(Box<super::example_2::MonsterT>),
}
impl Default for AnyUniqueAliasesT {
fn default() -> Self {
Self::NONE
}
}
impl AnyUniqueAliasesT {
pub fn any_unique_aliases_type(&self) -> AnyUniqueAliases {
match self {
Self::NONE => AnyUniqueAliases::NONE,
Self::M(_) => AnyUniqueAliases::M,
Self::TS(_) => AnyUniqueAliases::TS,
Self::M2(_) => AnyUniqueAliases::M2,
}
}
pub fn pack(&self, fbb: &mut flatbuffers::FlatBufferBuilder) -> Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>> {
match self {
Self::NONE => None,
Self::M(v) => Some(v.pack(fbb).as_union_value()),
Self::TS(v) => Some(v.pack(fbb).as_union_value()),
Self::M2(v) => Some(v.pack(fbb).as_union_value()),
}
}
/// If the union variant matches, return the owned MonsterT, setting the union to NONE.
pub fn take_m(&mut self) -> Option<Box<MonsterT>> {
if let Self::M(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the MonsterT.
pub fn as_m(&self) -> Option<&MonsterT> {
if let Self::M(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the MonsterT.
pub fn as_m_mut(&mut self) -> Option<&mut MonsterT> {
if let Self::M(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned TestSimpleTableWithEnumT, setting the union to NONE.
pub fn take_ts(&mut self) -> Option<Box<TestSimpleTableWithEnumT>> {
if let Self::TS(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::TS(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the TestSimpleTableWithEnumT.
pub fn as_ts(&self) -> Option<&TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the TestSimpleTableWithEnumT.
pub fn as_ts_mut(&mut self) -> Option<&mut TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned super::example_2::MonsterT, setting the union to NONE.
pub fn take_m2(&mut self) -> Option<Box<super::example_2::MonsterT>> {
if let Self::M2(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M2(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the super::example_2::MonsterT.
pub fn as_m2(&self) -> Option<&super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the super::example_2::MonsterT.
pub fn as_m2_mut(&mut self) -> Option<&mut super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_mut()) } else { None }
}
}
| follow | identifier_name |
any_unique_aliases_generated.rs | // automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_ANY_UNIQUE_ALIASES: u8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_ANY_UNIQUE_ALIASES: u8 = 3;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_ANY_UNIQUE_ALIASES: [AnyUniqueAliases; 4] = [
AnyUniqueAliases::NONE,
AnyUniqueAliases::M,
AnyUniqueAliases::TS,
AnyUniqueAliases::M2,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct AnyUniqueAliases(pub u8);
#[allow(non_upper_case_globals)]
impl AnyUniqueAliases {
pub const NONE: Self = Self(0);
pub const M: Self = Self(1);
pub const TS: Self = Self(2);
pub const M2: Self = Self(3);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 3;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::M,
Self::TS, | ];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::M => Some("M"),
Self::TS => Some("TS"),
Self::M2 => Some("M2"),
_ => None,
}
}
}
impl std::fmt::Debug for AnyUniqueAliases {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for AnyUniqueAliases {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe {
flatbuffers::read_scalar_at::<u8>(buf, loc)
};
Self(b)
}
}
impl flatbuffers::Push for AnyUniqueAliases {
type Output = AnyUniqueAliases;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for AnyUniqueAliases {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self {
let b = u8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for AnyUniqueAliases {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for AnyUniqueAliases {}
pub struct AnyUniqueAliasesUnionTableOffset {}
#[allow(clippy::upper_case_acronyms)]
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq)]
pub enum AnyUniqueAliasesT {
NONE,
M(Box<MonsterT>),
TS(Box<TestSimpleTableWithEnumT>),
M2(Box<super::example_2::MonsterT>),
}
impl Default for AnyUniqueAliasesT {
fn default() -> Self {
Self::NONE
}
}
impl AnyUniqueAliasesT {
pub fn any_unique_aliases_type(&self) -> AnyUniqueAliases {
match self {
Self::NONE => AnyUniqueAliases::NONE,
Self::M(_) => AnyUniqueAliases::M,
Self::TS(_) => AnyUniqueAliases::TS,
Self::M2(_) => AnyUniqueAliases::M2,
}
}
pub fn pack(&self, fbb: &mut flatbuffers::FlatBufferBuilder) -> Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>> {
match self {
Self::NONE => None,
Self::M(v) => Some(v.pack(fbb).as_union_value()),
Self::TS(v) => Some(v.pack(fbb).as_union_value()),
Self::M2(v) => Some(v.pack(fbb).as_union_value()),
}
}
/// If the union variant matches, return the owned MonsterT, setting the union to NONE.
pub fn take_m(&mut self) -> Option<Box<MonsterT>> {
if let Self::M(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the MonsterT.
pub fn as_m(&self) -> Option<&MonsterT> {
if let Self::M(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the MonsterT.
pub fn as_m_mut(&mut self) -> Option<&mut MonsterT> {
if let Self::M(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned TestSimpleTableWithEnumT, setting the union to NONE.
pub fn take_ts(&mut self) -> Option<Box<TestSimpleTableWithEnumT>> {
if let Self::TS(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::TS(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the TestSimpleTableWithEnumT.
pub fn as_ts(&self) -> Option<&TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the TestSimpleTableWithEnumT.
pub fn as_ts_mut(&mut self) -> Option<&mut TestSimpleTableWithEnumT> {
if let Self::TS(v) = self { Some(v.as_mut()) } else { None }
}
/// If the union variant matches, return the owned super::example_2::MonsterT, setting the union to NONE.
pub fn take_m2(&mut self) -> Option<Box<super::example_2::MonsterT>> {
if let Self::M2(_) = self {
let v = std::mem::replace(self, Self::NONE);
if let Self::M2(w) = v {
Some(w)
} else {
unreachable!()
}
} else {
None
}
}
/// If the union variant matches, return a reference to the super::example_2::MonsterT.
pub fn as_m2(&self) -> Option<&super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_ref()) } else { None }
}
/// If the union variant matches, return a mutable reference to the super::example_2::MonsterT.
pub fn as_m2_mut(&mut self) -> Option<&mut super::example_2::MonsterT> {
if let Self::M2(v) = self { Some(v.as_mut()) } else { None }
}
} | Self::M2, | random_line_split |
client.rs | //! The modules which contains CDRS Cassandra client.
use std::net;
use std::io;
use std::collections::HashMap;
use query::{Query, QueryParams, QueryBatch};
use frame::{Frame, Opcode, Flag};
use frame::frame_response::ResponseBody;
use IntoBytes;
use frame::parser::parse_frame;
use types::*;
use frame::events::SimpleServerEvent;
use compression::Compression;
use authenticators::Authenticator;
use error;
use transport::CDRSTransport;
use events::{Listener, EventStream, new_listener};
/// CDRS driver structure that provides a basic functionality to work with DB including
/// establishing new connection, getting supported options, preparing and executing CQL
/// queries, using compression and other.
#[derive(Eq, PartialEq, Ord, PartialOrd)]
pub struct CDRS<T: Authenticator, X: CDRSTransport> {
compressor: Compression,
authenticator: T,
transport: X,
}
/// Map of options supported by Cassandra server.
pub type CassandraOptions = HashMap<String, Vec<String>>;
impl<'a, T: Authenticator + 'a, X: CDRSTransport + 'a> CDRS<T, X> {
/// The method creates new instance of CDRS driver. At this step an instance doesn't
/// connected to DB Server. To create new instance two parameters are needed to be
/// provided - `addr` is IP address of DB Server, `authenticator` is a selected authenticator
/// that is supported by particular DB Server. There are few authenticators already
/// provided by this trait.
pub fn new(transport: X, authenticator: T) -> CDRS<T, X> {
CDRS {
compressor: Compression::None,
authenticator: authenticator,
transport: transport,
}
}
/// The method makes an Option request to DB Server. As a response the server returns
/// a map of supported options.
pub fn get_options(&mut self) -> error::Result<CassandraOptions> {
let options_frame = Frame::new_req_options().into_cbytes();
try!(self.transport.write(options_frame.as_slice()));
parse_frame(&mut self.transport, &self.compressor)
.map(|frame| match frame.get_body() {
Ok(ResponseBody::Supported(ref supported_body)) => supported_body.data.clone(),
_ => unreachable!(),
})
}
/// The method establishes connection to the server which address was provided on previous
/// step. To create connection it's required to provide a compression method from a list
/// of supported ones. In 4-th version of Cassandra protocol lz4 (`Compression::Lz4`)
/// and snappy (`Compression::Snappy`) are supported. There is also one special compression
/// method provided by CRDR driver, it's `Compression::None` that tells drivers that it
/// should work without compression. If compression is provided then incomming frames
/// will be decompressed automatically.
pub fn start(mut self, compressor: Compression) -> error::Result<Session<T, X>> {
self.compressor = compressor;
let startup_frame = Frame::new_req_startup(compressor.as_str()).into_cbytes();
try!(self.transport.write(startup_frame.as_slice()));
let start_response = try!(parse_frame(&mut self.transport, &compressor));
if start_response.opcode == Opcode::Ready {
return Ok(Session::start(self));
}
if start_response.opcode == Opcode::Authenticate {
let body = start_response.get_body()?;
let authenticator = body.get_authenticator().expect(
"Cassandra Server did communicate that it needed password
authentication but the auth schema was missing in the body response",
);
// This creates a new scope; avoiding a clone
// and we check whether
// 1. any authenticators has been passed in by client and if not send error back
// 2. authenticator is provided by the client and `auth_scheme` presented by
// the server and client are same if not send error back
// 3. if it falls through it means the preliminary conditions are true
let auth_check = self.authenticator
.get_cassandra_name()
.ok_or(error::Error::General("No authenticator was provided".to_string()))
.map(|auth| {
if authenticator != auth {
let io_err = io::Error::new(
io::ErrorKind::NotFound,
format!(
"Unsupported type of authenticator. {:?} got,
but {} is supported.",
authenticator,
authenticator
),
);
return Err(error::Error::Io(io_err));
}
Ok(())
});
if let Err(err) = auth_check {
return Err(err);
}
let auth_token_bytes = self.authenticator.get_auth_token().into_cbytes();
try!(self.transport
.write(Frame::new_req_auth_response(auth_token_bytes)
.into_cbytes()
.as_slice()));
try!(parse_frame(&mut self.transport, &compressor));
return Ok(Session::start(self));
}
unimplemented!();
}
fn drop_connection(&mut self) -> error::Result<()> {
self.transport
.close(net::Shutdown::Both)
.map_err(|err| error::Error::Io(err))
}
}
| pub struct Session<T: Authenticator, X: CDRSTransport> {
started: bool,
cdrs: CDRS<T, X>,
compressor: Compression,
}
impl<T: Authenticator, X: CDRSTransport> Session<T, X> {
/// Creates new session basing on CDRS instance.
pub fn start(cdrs: CDRS<T, X>) -> Session<T, X> {
let compressor = cdrs.compressor.clone();
Session {
cdrs: cdrs,
started: true,
compressor: compressor,
}
}
/// The method overrides a compression method of current session
pub fn compressor(&mut self, compressor: Compression) -> &mut Self {
self.compressor = compressor;
self
}
/// Manually ends current session.
/// Apart of that session will be ended automatically when the instance is dropped.
pub fn end(&mut self) {
if self.started {
self.started = false;
match self.cdrs.drop_connection() {
Ok(_) => (),
Err(err) => {
println!("Error occured during dropping CDRS {:?}", err);
}
}
}
}
/// The method makes a request to DB Server to prepare provided query.
pub fn prepare(&mut self,
query: String,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_prepare(query, flags).into_cbytes();
try!(self.cdrs.transport.write(options_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query with provided id
/// using provided query parameters. `id` is an ID of a query which Server
/// returns back to a driver as a response to `prepare` request.
pub fn execute(&mut self,
id: &CBytesShort,
query_parameters: QueryParams,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_execute(id, query_parameters, flags).into_cbytes();
(self.cdrs.transport.write(options_frame.as_slice()))?;
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query provided in `query` argument.
/// you can build the query with QueryBuilder
/// ```
/// use cdrs::query::QueryBuilder;
/// use cdrs::compression::Compression;
/// use cdrs::consistency::Consistency;
///
/// let select_query = QueryBuilder::new("select * from emp").finalize();
/// ```
pub fn query(&mut self,
query: Query,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_query(query.query,
query.consistency,
query.values,
query.with_names,
query.page_size,
query.paging_state,
query.serial_consistency,
query.timestamp,
flags)
.into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
pub fn batch(&mut self,
batch_query: QueryBatch,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_batch(batch_query, flags).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// It consumes CDRS
pub fn listen_for<'a>(mut self,
events: Vec<SimpleServerEvent>)
-> error::Result<(Listener<X>, EventStream)> {
let query_frame = Frame::new_req_register(events).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
try!(parse_frame(&mut self.cdrs.transport, &self.compressor));
Ok(new_listener(self.cdrs.transport))
}
} | /// The object that provides functionality for communication with Cassandra server. | random_line_split |
client.rs | //! The modules which contains CDRS Cassandra client.
use std::net;
use std::io;
use std::collections::HashMap;
use query::{Query, QueryParams, QueryBatch};
use frame::{Frame, Opcode, Flag};
use frame::frame_response::ResponseBody;
use IntoBytes;
use frame::parser::parse_frame;
use types::*;
use frame::events::SimpleServerEvent;
use compression::Compression;
use authenticators::Authenticator;
use error;
use transport::CDRSTransport;
use events::{Listener, EventStream, new_listener};
/// CDRS driver structure that provides a basic functionality to work with DB including
/// establishing new connection, getting supported options, preparing and executing CQL
/// queries, using compression and other.
#[derive(Eq, PartialEq, Ord, PartialOrd)]
pub struct CDRS<T: Authenticator, X: CDRSTransport> {
compressor: Compression,
authenticator: T,
transport: X,
}
/// Map of options supported by Cassandra server.
pub type CassandraOptions = HashMap<String, Vec<String>>;
impl<'a, T: Authenticator + 'a, X: CDRSTransport + 'a> CDRS<T, X> {
/// The method creates new instance of CDRS driver. At this step an instance doesn't
/// connected to DB Server. To create new instance two parameters are needed to be
/// provided - `addr` is IP address of DB Server, `authenticator` is a selected authenticator
/// that is supported by particular DB Server. There are few authenticators already
/// provided by this trait.
pub fn new(transport: X, authenticator: T) -> CDRS<T, X> {
CDRS {
compressor: Compression::None,
authenticator: authenticator,
transport: transport,
}
}
/// The method makes an Option request to DB Server. As a response the server returns
/// a map of supported options.
pub fn get_options(&mut self) -> error::Result<CassandraOptions> {
let options_frame = Frame::new_req_options().into_cbytes();
try!(self.transport.write(options_frame.as_slice()));
parse_frame(&mut self.transport, &self.compressor)
.map(|frame| match frame.get_body() {
Ok(ResponseBody::Supported(ref supported_body)) => supported_body.data.clone(),
_ => unreachable!(),
})
}
/// The method establishes connection to the server which address was provided on previous
/// step. To create connection it's required to provide a compression method from a list
/// of supported ones. In 4-th version of Cassandra protocol lz4 (`Compression::Lz4`)
/// and snappy (`Compression::Snappy`) are supported. There is also one special compression
/// method provided by CRDR driver, it's `Compression::None` that tells drivers that it
/// should work without compression. If compression is provided then incomming frames
/// will be decompressed automatically.
pub fn start(mut self, compressor: Compression) -> error::Result<Session<T, X>> {
self.compressor = compressor;
let startup_frame = Frame::new_req_startup(compressor.as_str()).into_cbytes();
try!(self.transport.write(startup_frame.as_slice()));
let start_response = try!(parse_frame(&mut self.transport, &compressor));
if start_response.opcode == Opcode::Ready {
return Ok(Session::start(self));
}
if start_response.opcode == Opcode::Authenticate |
unimplemented!();
}
fn drop_connection(&mut self) -> error::Result<()> {
self.transport
.close(net::Shutdown::Both)
.map_err(|err| error::Error::Io(err))
}
}
/// The object that provides functionality for communication with Cassandra server.
pub struct Session<T: Authenticator, X: CDRSTransport> {
started: bool,
cdrs: CDRS<T, X>,
compressor: Compression,
}
impl<T: Authenticator, X: CDRSTransport> Session<T, X> {
/// Creates new session basing on CDRS instance.
pub fn start(cdrs: CDRS<T, X>) -> Session<T, X> {
let compressor = cdrs.compressor.clone();
Session {
cdrs: cdrs,
started: true,
compressor: compressor,
}
}
/// The method overrides a compression method of current session
pub fn compressor(&mut self, compressor: Compression) -> &mut Self {
self.compressor = compressor;
self
}
/// Manually ends current session.
/// Apart of that session will be ended automatically when the instance is dropped.
pub fn end(&mut self) {
if self.started {
self.started = false;
match self.cdrs.drop_connection() {
Ok(_) => (),
Err(err) => {
println!("Error occured during dropping CDRS {:?}", err);
}
}
}
}
/// The method makes a request to DB Server to prepare provided query.
pub fn prepare(&mut self,
query: String,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_prepare(query, flags).into_cbytes();
try!(self.cdrs.transport.write(options_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query with provided id
/// using provided query parameters. `id` is an ID of a query which Server
/// returns back to a driver as a response to `prepare` request.
pub fn execute(&mut self,
id: &CBytesShort,
query_parameters: QueryParams,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_execute(id, query_parameters, flags).into_cbytes();
(self.cdrs.transport.write(options_frame.as_slice()))?;
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query provided in `query` argument.
/// you can build the query with QueryBuilder
/// ```
/// use cdrs::query::QueryBuilder;
/// use cdrs::compression::Compression;
/// use cdrs::consistency::Consistency;
///
/// let select_query = QueryBuilder::new("select * from emp").finalize();
/// ```
pub fn query(&mut self,
query: Query,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_query(query.query,
query.consistency,
query.values,
query.with_names,
query.page_size,
query.paging_state,
query.serial_consistency,
query.timestamp,
flags)
.into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
pub fn batch(&mut self,
batch_query: QueryBatch,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_batch(batch_query, flags).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// It consumes CDRS
pub fn listen_for<'a>(mut self,
events: Vec<SimpleServerEvent>)
-> error::Result<(Listener<X>, EventStream)> {
let query_frame = Frame::new_req_register(events).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
try!(parse_frame(&mut self.cdrs.transport, &self.compressor));
Ok(new_listener(self.cdrs.transport))
}
}
| {
let body = start_response.get_body()?;
let authenticator = body.get_authenticator().expect(
"Cassandra Server did communicate that it needed password
authentication but the auth schema was missing in the body response",
);
// This creates a new scope; avoiding a clone
// and we check whether
// 1. any authenticators has been passed in by client and if not send error back
// 2. authenticator is provided by the client and `auth_scheme` presented by
// the server and client are same if not send error back
// 3. if it falls through it means the preliminary conditions are true
let auth_check = self.authenticator
.get_cassandra_name()
.ok_or(error::Error::General("No authenticator was provided".to_string()))
.map(|auth| {
if authenticator != auth {
let io_err = io::Error::new(
io::ErrorKind::NotFound,
format!(
"Unsupported type of authenticator. {:?} got,
but {} is supported.",
authenticator,
authenticator
),
);
return Err(error::Error::Io(io_err));
}
Ok(())
});
if let Err(err) = auth_check {
return Err(err);
}
let auth_token_bytes = self.authenticator.get_auth_token().into_cbytes();
try!(self.transport
.write(Frame::new_req_auth_response(auth_token_bytes)
.into_cbytes()
.as_slice()));
try!(parse_frame(&mut self.transport, &compressor));
return Ok(Session::start(self));
} | conditional_block |
client.rs | //! The modules which contains CDRS Cassandra client.
use std::net;
use std::io;
use std::collections::HashMap;
use query::{Query, QueryParams, QueryBatch};
use frame::{Frame, Opcode, Flag};
use frame::frame_response::ResponseBody;
use IntoBytes;
use frame::parser::parse_frame;
use types::*;
use frame::events::SimpleServerEvent;
use compression::Compression;
use authenticators::Authenticator;
use error;
use transport::CDRSTransport;
use events::{Listener, EventStream, new_listener};
/// CDRS driver structure that provides a basic functionality to work with DB including
/// establishing new connection, getting supported options, preparing and executing CQL
/// queries, using compression and other.
#[derive(Eq, PartialEq, Ord, PartialOrd)]
pub struct CDRS<T: Authenticator, X: CDRSTransport> {
compressor: Compression,
authenticator: T,
transport: X,
}
/// Map of options supported by Cassandra server.
pub type CassandraOptions = HashMap<String, Vec<String>>;
impl<'a, T: Authenticator + 'a, X: CDRSTransport + 'a> CDRS<T, X> {
/// The method creates new instance of CDRS driver. At this step an instance doesn't
/// connected to DB Server. To create new instance two parameters are needed to be
/// provided - `addr` is IP address of DB Server, `authenticator` is a selected authenticator
/// that is supported by particular DB Server. There are few authenticators already
/// provided by this trait.
pub fn new(transport: X, authenticator: T) -> CDRS<T, X> {
CDRS {
compressor: Compression::None,
authenticator: authenticator,
transport: transport,
}
}
/// The method makes an Option request to DB Server. As a response the server returns
/// a map of supported options.
pub fn get_options(&mut self) -> error::Result<CassandraOptions> {
let options_frame = Frame::new_req_options().into_cbytes();
try!(self.transport.write(options_frame.as_slice()));
parse_frame(&mut self.transport, &self.compressor)
.map(|frame| match frame.get_body() {
Ok(ResponseBody::Supported(ref supported_body)) => supported_body.data.clone(),
_ => unreachable!(),
})
}
/// The method establishes connection to the server which address was provided on previous
/// step. To create connection it's required to provide a compression method from a list
/// of supported ones. In 4-th version of Cassandra protocol lz4 (`Compression::Lz4`)
/// and snappy (`Compression::Snappy`) are supported. There is also one special compression
/// method provided by CRDR driver, it's `Compression::None` that tells drivers that it
/// should work without compression. If compression is provided then incomming frames
/// will be decompressed automatically.
pub fn start(mut self, compressor: Compression) -> error::Result<Session<T, X>> {
self.compressor = compressor;
let startup_frame = Frame::new_req_startup(compressor.as_str()).into_cbytes();
try!(self.transport.write(startup_frame.as_slice()));
let start_response = try!(parse_frame(&mut self.transport, &compressor));
if start_response.opcode == Opcode::Ready {
return Ok(Session::start(self));
}
if start_response.opcode == Opcode::Authenticate {
let body = start_response.get_body()?;
let authenticator = body.get_authenticator().expect(
"Cassandra Server did communicate that it needed password
authentication but the auth schema was missing in the body response",
);
// This creates a new scope; avoiding a clone
// and we check whether
// 1. any authenticators has been passed in by client and if not send error back
// 2. authenticator is provided by the client and `auth_scheme` presented by
// the server and client are same if not send error back
// 3. if it falls through it means the preliminary conditions are true
let auth_check = self.authenticator
.get_cassandra_name()
.ok_or(error::Error::General("No authenticator was provided".to_string()))
.map(|auth| {
if authenticator != auth {
let io_err = io::Error::new(
io::ErrorKind::NotFound,
format!(
"Unsupported type of authenticator. {:?} got,
but {} is supported.",
authenticator,
authenticator
),
);
return Err(error::Error::Io(io_err));
}
Ok(())
});
if let Err(err) = auth_check {
return Err(err);
}
let auth_token_bytes = self.authenticator.get_auth_token().into_cbytes();
try!(self.transport
.write(Frame::new_req_auth_response(auth_token_bytes)
.into_cbytes()
.as_slice()));
try!(parse_frame(&mut self.transport, &compressor));
return Ok(Session::start(self));
}
unimplemented!();
}
fn drop_connection(&mut self) -> error::Result<()> {
self.transport
.close(net::Shutdown::Both)
.map_err(|err| error::Error::Io(err))
}
}
/// The object that provides functionality for communication with Cassandra server.
pub struct Session<T: Authenticator, X: CDRSTransport> {
started: bool,
cdrs: CDRS<T, X>,
compressor: Compression,
}
impl<T: Authenticator, X: CDRSTransport> Session<T, X> {
/// Creates new session basing on CDRS instance.
pub fn start(cdrs: CDRS<T, X>) -> Session<T, X> {
let compressor = cdrs.compressor.clone();
Session {
cdrs: cdrs,
started: true,
compressor: compressor,
}
}
/// The method overrides a compression method of current session
pub fn compressor(&mut self, compressor: Compression) -> &mut Self {
self.compressor = compressor;
self
}
/// Manually ends current session.
/// Apart of that session will be ended automatically when the instance is dropped.
pub fn end(&mut self) {
if self.started {
self.started = false;
match self.cdrs.drop_connection() {
Ok(_) => (),
Err(err) => {
println!("Error occured during dropping CDRS {:?}", err);
}
}
}
}
/// The method makes a request to DB Server to prepare provided query.
pub fn prepare(&mut self,
query: String,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_prepare(query, flags).into_cbytes();
try!(self.cdrs.transport.write(options_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query with provided id
/// using provided query parameters. `id` is an ID of a query which Server
/// returns back to a driver as a response to `prepare` request.
pub fn execute(&mut self,
id: &CBytesShort,
query_parameters: QueryParams,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let options_frame = Frame::new_req_execute(id, query_parameters, flags).into_cbytes();
(self.cdrs.transport.write(options_frame.as_slice()))?;
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// The method makes a request to DB Server to execute a query provided in `query` argument.
/// you can build the query with QueryBuilder
/// ```
/// use cdrs::query::QueryBuilder;
/// use cdrs::compression::Compression;
/// use cdrs::consistency::Consistency;
///
/// let select_query = QueryBuilder::new("select * from emp").finalize();
/// ```
pub fn query(&mut self,
query: Query,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_query(query.query,
query.consistency,
query.values,
query.with_names,
query.page_size,
query.paging_state,
query.serial_consistency,
query.timestamp,
flags)
.into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
pub fn batch(&mut self,
batch_query: QueryBatch,
with_tracing: bool,
with_warnings: bool)
-> error::Result<Frame> {
let mut flags = vec![];
if with_tracing {
flags.push(Flag::Tracing);
}
if with_warnings {
flags.push(Flag::Warning);
}
let query_frame = Frame::new_req_batch(batch_query, flags).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
parse_frame(&mut self.cdrs.transport, &self.compressor)
}
/// It consumes CDRS
pub fn | <'a>(mut self,
events: Vec<SimpleServerEvent>)
-> error::Result<(Listener<X>, EventStream)> {
let query_frame = Frame::new_req_register(events).into_cbytes();
try!(self.cdrs.transport.write(query_frame.as_slice()));
try!(parse_frame(&mut self.cdrs.transport, &self.compressor));
Ok(new_listener(self.cdrs.transport))
}
}
| listen_for | identifier_name |
manual_non_exhaustive.rs | use clippy_utils::attrs::is_doc_hidden;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet_opt;
use clippy_utils::{meets_msrv, msrvs};
use if_chain::if_chain;
use rustc_ast::ast::{FieldDef, Item, ItemKind, Variant, VariantData, VisibilityKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for manual implementations of the non-exhaustive pattern.
///
/// ### Why is this bad?
/// Using the #[non_exhaustive] attribute expresses better the intent
/// and allows possible optimizations when applied to enums.
///
/// ### Example
/// ```rust
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// _c: (),
/// }
///
/// enum E {
/// A,
/// B,
/// #[doc(hidden)]
/// _C,
/// }
///
/// struct T(pub i32, pub i32, ());
/// ```
/// Use instead:
/// ```rust
/// #[non_exhaustive]
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// }
///
/// #[non_exhaustive]
/// enum E {
/// A,
/// B,
/// }
///
/// #[non_exhaustive]
/// struct T(pub i32, pub i32);
/// ```
pub MANUAL_NON_EXHAUSTIVE,
style,
"manual implementations of the non-exhaustive pattern can be simplified using #[non_exhaustive]"
}
#[derive(Clone)]
pub struct ManualNonExhaustive {
msrv: Option<RustcVersion>,
}
impl ManualNonExhaustive {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(ManualNonExhaustive => [MANUAL_NON_EXHAUSTIVE]);
impl EarlyLintPass for ManualNonExhaustive {
fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) {
if !meets_msrv(self.msrv.as_ref(), &msrvs::NON_EXHAUSTIVE) {
return;
}
match &item.kind {
ItemKind::Enum(def, _) => {
check_manual_non_exhaustive_enum(cx, item, &def.variants);
},
ItemKind::Struct(variant_data, _) => {
if let VariantData::Unit(..) = variant_data {
return;
}
check_manual_non_exhaustive_struct(cx, item, variant_data);
},
_ => {},
}
}
extract_msrv_attr!(EarlyContext);
}
fn check_manual_non_exhaustive_enum(cx: &EarlyContext<'_>, item: &Item, variants: &[Variant]) {
fn is_non_exhaustive_marker(variant: &Variant) -> bool {
matches!(variant.data, VariantData::Unit(_))
&& variant.ident.as_str().starts_with('_')
&& is_doc_hidden(&variant.attrs)
}
let mut markers = variants.iter().filter(|v| is_non_exhaustive_marker(v));
if_chain! {
if let Some(marker) = markers.next();
if markers.count() == 0 && variants.len() > 1;
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = cx.sess.source_map().span_until_char(item.span, '{');
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this variant");
});
}
}
}
fn check_manual_non_exhaustive_struct(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) {
fn | (field: &FieldDef) -> bool {
matches!(field.vis.kind, VisibilityKind::Inherited)
}
fn is_non_exhaustive_marker(field: &FieldDef) -> bool {
is_private(field) && field.ty.kind.is_unit() && field.ident.map_or(true, |n| n.as_str().starts_with('_'))
}
fn find_header_span(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) -> Span {
let delimiter = match data {
VariantData::Struct(..) => '{',
VariantData::Tuple(..) => '(',
VariantData::Unit(_) => unreachable!("`VariantData::Unit` is already handled above"),
};
cx.sess.source_map().span_until_char(item.span, delimiter)
}
let fields = data.fields();
let private_fields = fields.iter().filter(|f| is_private(f)).count();
let public_fields = fields.iter().filter(|f| f.vis.kind.is_pub()).count();
if_chain! {
if private_fields == 1 && public_fields >= 1 && public_fields == fields.len() - 1;
if let Some(marker) = fields.iter().find(|f| is_non_exhaustive_marker(f));
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = find_header_span(cx, item, data);
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this field");
});
}
}
}
| is_private | identifier_name |
manual_non_exhaustive.rs | use clippy_utils::attrs::is_doc_hidden;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet_opt;
use clippy_utils::{meets_msrv, msrvs};
use if_chain::if_chain;
use rustc_ast::ast::{FieldDef, Item, ItemKind, Variant, VariantData, VisibilityKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for manual implementations of the non-exhaustive pattern.
///
/// ### Why is this bad?
/// Using the #[non_exhaustive] attribute expresses better the intent
/// and allows possible optimizations when applied to enums.
///
/// ### Example
/// ```rust
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// _c: (),
/// }
///
/// enum E {
/// A,
/// B,
/// #[doc(hidden)]
/// _C,
/// }
///
/// struct T(pub i32, pub i32, ());
/// ```
/// Use instead:
/// ```rust
/// #[non_exhaustive]
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// }
///
/// #[non_exhaustive]
/// enum E {
/// A,
/// B,
/// }
///
/// #[non_exhaustive]
/// struct T(pub i32, pub i32);
/// ```
pub MANUAL_NON_EXHAUSTIVE,
style,
"manual implementations of the non-exhaustive pattern can be simplified using #[non_exhaustive]"
}
#[derive(Clone)]
pub struct ManualNonExhaustive {
msrv: Option<RustcVersion>,
}
impl ManualNonExhaustive {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(ManualNonExhaustive => [MANUAL_NON_EXHAUSTIVE]);
impl EarlyLintPass for ManualNonExhaustive {
fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) {
if !meets_msrv(self.msrv.as_ref(), &msrvs::NON_EXHAUSTIVE) {
return;
}
match &item.kind {
ItemKind::Enum(def, _) => {
check_manual_non_exhaustive_enum(cx, item, &def.variants);
},
ItemKind::Struct(variant_data, _) => {
if let VariantData::Unit(..) = variant_data {
return;
}
check_manual_non_exhaustive_struct(cx, item, variant_data);
},
_ => {},
}
}
extract_msrv_attr!(EarlyContext);
}
fn check_manual_non_exhaustive_enum(cx: &EarlyContext<'_>, item: &Item, variants: &[Variant]) {
fn is_non_exhaustive_marker(variant: &Variant) -> bool {
matches!(variant.data, VariantData::Unit(_))
&& variant.ident.as_str().starts_with('_')
&& is_doc_hidden(&variant.attrs)
}
let mut markers = variants.iter().filter(|v| is_non_exhaustive_marker(v));
if_chain! {
if let Some(marker) = markers.next();
if markers.count() == 0 && variants.len() > 1;
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = cx.sess.source_map().span_until_char(item.span, '{');
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this variant");
});
} | }
}
fn check_manual_non_exhaustive_struct(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) {
fn is_private(field: &FieldDef) -> bool {
matches!(field.vis.kind, VisibilityKind::Inherited)
}
fn is_non_exhaustive_marker(field: &FieldDef) -> bool {
is_private(field) && field.ty.kind.is_unit() && field.ident.map_or(true, |n| n.as_str().starts_with('_'))
}
fn find_header_span(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) -> Span {
let delimiter = match data {
VariantData::Struct(..) => '{',
VariantData::Tuple(..) => '(',
VariantData::Unit(_) => unreachable!("`VariantData::Unit` is already handled above"),
};
cx.sess.source_map().span_until_char(item.span, delimiter)
}
let fields = data.fields();
let private_fields = fields.iter().filter(|f| is_private(f)).count();
let public_fields = fields.iter().filter(|f| f.vis.kind.is_pub()).count();
if_chain! {
if private_fields == 1 && public_fields >= 1 && public_fields == fields.len() - 1;
if let Some(marker) = fields.iter().find(|f| is_non_exhaustive_marker(f));
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = find_header_span(cx, item, data);
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this field");
});
}
}
} | random_line_split |
|
manual_non_exhaustive.rs | use clippy_utils::attrs::is_doc_hidden;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet_opt;
use clippy_utils::{meets_msrv, msrvs};
use if_chain::if_chain;
use rustc_ast::ast::{FieldDef, Item, ItemKind, Variant, VariantData, VisibilityKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for manual implementations of the non-exhaustive pattern.
///
/// ### Why is this bad?
/// Using the #[non_exhaustive] attribute expresses better the intent
/// and allows possible optimizations when applied to enums.
///
/// ### Example
/// ```rust
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// _c: (),
/// }
///
/// enum E {
/// A,
/// B,
/// #[doc(hidden)]
/// _C,
/// }
///
/// struct T(pub i32, pub i32, ());
/// ```
/// Use instead:
/// ```rust
/// #[non_exhaustive]
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// }
///
/// #[non_exhaustive]
/// enum E {
/// A,
/// B,
/// }
///
/// #[non_exhaustive]
/// struct T(pub i32, pub i32);
/// ```
pub MANUAL_NON_EXHAUSTIVE,
style,
"manual implementations of the non-exhaustive pattern can be simplified using #[non_exhaustive]"
}
#[derive(Clone)]
pub struct ManualNonExhaustive {
msrv: Option<RustcVersion>,
}
impl ManualNonExhaustive {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(ManualNonExhaustive => [MANUAL_NON_EXHAUSTIVE]);
impl EarlyLintPass for ManualNonExhaustive {
fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) {
if !meets_msrv(self.msrv.as_ref(), &msrvs::NON_EXHAUSTIVE) {
return;
}
match &item.kind {
ItemKind::Enum(def, _) => {
check_manual_non_exhaustive_enum(cx, item, &def.variants);
},
ItemKind::Struct(variant_data, _) => | ,
_ => {},
}
}
extract_msrv_attr!(EarlyContext);
}
fn check_manual_non_exhaustive_enum(cx: &EarlyContext<'_>, item: &Item, variants: &[Variant]) {
fn is_non_exhaustive_marker(variant: &Variant) -> bool {
matches!(variant.data, VariantData::Unit(_))
&& variant.ident.as_str().starts_with('_')
&& is_doc_hidden(&variant.attrs)
}
let mut markers = variants.iter().filter(|v| is_non_exhaustive_marker(v));
if_chain! {
if let Some(marker) = markers.next();
if markers.count() == 0 && variants.len() > 1;
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = cx.sess.source_map().span_until_char(item.span, '{');
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this variant");
});
}
}
}
fn check_manual_non_exhaustive_struct(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) {
fn is_private(field: &FieldDef) -> bool {
matches!(field.vis.kind, VisibilityKind::Inherited)
}
fn is_non_exhaustive_marker(field: &FieldDef) -> bool {
is_private(field) && field.ty.kind.is_unit() && field.ident.map_or(true, |n| n.as_str().starts_with('_'))
}
fn find_header_span(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) -> Span {
let delimiter = match data {
VariantData::Struct(..) => '{',
VariantData::Tuple(..) => '(',
VariantData::Unit(_) => unreachable!("`VariantData::Unit` is already handled above"),
};
cx.sess.source_map().span_until_char(item.span, delimiter)
}
let fields = data.fields();
let private_fields = fields.iter().filter(|f| is_private(f)).count();
let public_fields = fields.iter().filter(|f| f.vis.kind.is_pub()).count();
if_chain! {
if private_fields == 1 && public_fields >= 1 && public_fields == fields.len() - 1;
if let Some(marker) = fields.iter().find(|f| is_non_exhaustive_marker(f));
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = find_header_span(cx, item, data);
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this field");
});
}
}
}
| {
if let VariantData::Unit(..) = variant_data {
return;
}
check_manual_non_exhaustive_struct(cx, item, variant_data);
} | conditional_block |
manual_non_exhaustive.rs | use clippy_utils::attrs::is_doc_hidden;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet_opt;
use clippy_utils::{meets_msrv, msrvs};
use if_chain::if_chain;
use rustc_ast::ast::{FieldDef, Item, ItemKind, Variant, VariantData, VisibilityKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// ### What it does
/// Checks for manual implementations of the non-exhaustive pattern.
///
/// ### Why is this bad?
/// Using the #[non_exhaustive] attribute expresses better the intent
/// and allows possible optimizations when applied to enums.
///
/// ### Example
/// ```rust
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// _c: (),
/// }
///
/// enum E {
/// A,
/// B,
/// #[doc(hidden)]
/// _C,
/// }
///
/// struct T(pub i32, pub i32, ());
/// ```
/// Use instead:
/// ```rust
/// #[non_exhaustive]
/// struct S {
/// pub a: i32,
/// pub b: i32,
/// }
///
/// #[non_exhaustive]
/// enum E {
/// A,
/// B,
/// }
///
/// #[non_exhaustive]
/// struct T(pub i32, pub i32);
/// ```
pub MANUAL_NON_EXHAUSTIVE,
style,
"manual implementations of the non-exhaustive pattern can be simplified using #[non_exhaustive]"
}
#[derive(Clone)]
pub struct ManualNonExhaustive {
msrv: Option<RustcVersion>,
}
impl ManualNonExhaustive {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(ManualNonExhaustive => [MANUAL_NON_EXHAUSTIVE]);
impl EarlyLintPass for ManualNonExhaustive {
fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) {
if !meets_msrv(self.msrv.as_ref(), &msrvs::NON_EXHAUSTIVE) {
return;
}
match &item.kind {
ItemKind::Enum(def, _) => {
check_manual_non_exhaustive_enum(cx, item, &def.variants);
},
ItemKind::Struct(variant_data, _) => {
if let VariantData::Unit(..) = variant_data {
return;
}
check_manual_non_exhaustive_struct(cx, item, variant_data);
},
_ => {},
}
}
extract_msrv_attr!(EarlyContext);
}
fn check_manual_non_exhaustive_enum(cx: &EarlyContext<'_>, item: &Item, variants: &[Variant]) {
fn is_non_exhaustive_marker(variant: &Variant) -> bool {
matches!(variant.data, VariantData::Unit(_))
&& variant.ident.as_str().starts_with('_')
&& is_doc_hidden(&variant.attrs)
}
let mut markers = variants.iter().filter(|v| is_non_exhaustive_marker(v));
if_chain! {
if let Some(marker) = markers.next();
if markers.count() == 0 && variants.len() > 1;
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = cx.sess.source_map().span_until_char(item.span, '{');
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this variant");
});
}
}
}
fn check_manual_non_exhaustive_struct(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) {
fn is_private(field: &FieldDef) -> bool {
matches!(field.vis.kind, VisibilityKind::Inherited)
}
fn is_non_exhaustive_marker(field: &FieldDef) -> bool |
fn find_header_span(cx: &EarlyContext<'_>, item: &Item, data: &VariantData) -> Span {
let delimiter = match data {
VariantData::Struct(..) => '{',
VariantData::Tuple(..) => '(',
VariantData::Unit(_) => unreachable!("`VariantData::Unit` is already handled above"),
};
cx.sess.source_map().span_until_char(item.span, delimiter)
}
let fields = data.fields();
let private_fields = fields.iter().filter(|f| is_private(f)).count();
let public_fields = fields.iter().filter(|f| f.vis.kind.is_pub()).count();
if_chain! {
if private_fields == 1 && public_fields >= 1 && public_fields == fields.len() - 1;
if let Some(marker) = fields.iter().find(|f| is_non_exhaustive_marker(f));
then {
span_lint_and_then(
cx,
MANUAL_NON_EXHAUSTIVE,
item.span,
"this seems like a manual implementation of the non-exhaustive pattern",
|diag| {
if_chain! {
if !item.attrs.iter().any(|attr| attr.has_name(sym::non_exhaustive));
let header_span = find_header_span(cx, item, data);
if let Some(snippet) = snippet_opt(cx, header_span);
then {
diag.span_suggestion(
header_span,
"add the attribute",
format!("#[non_exhaustive] {}", snippet),
Applicability::Unspecified,
);
}
}
diag.span_help(marker.span, "remove this field");
});
}
}
}
| {
is_private(field) && field.ty.kind.is_unit() && field.ident.map_or(true, |n| n.as_str().starts_with('_'))
} | identifier_body |
view_ref.d.ts | import * as viewModule from './view';
import { ChangeDetectorRef } from '../change_detection/change_detector_ref';
import { RenderViewRef, RenderFragmentRef } from 'angular2/src/core/render/api';
export declare function internalView(viewRef: ViewRef): viewModule.AppView;
export declare function internalProtoView(protoViewRef: ProtoViewRef): viewModule.AppProtoView;
/**
* Represents a View containing a single Element that is the Host Element of a {@link Component}
* instance.
*
* A Host View is created for every dynamically created Component that was compiled on its own (as
* opposed to as a part of another Component's Template) via {@link Compiler#compileInHost} or one
* of the higher-level APIs: {@link AppViewManager#createRootHostView},
* {@link AppViewManager#createHostViewInContainer}, {@link ViewContainerRef#createHostView}.
*/
export interface HostViewRef {
}
/**
* Represents an Angular View.
*
* <!-- TODO: move the next two paragraphs to the dev guide -->
* A View is a fundamental building block of the application UI. It is the smallest grouping of
* Elements which are created and destroyed together.
*
* Properties of elements in a View can change, but the structure (number and order) of elements in
* a View cannot. Changing the structure of Elements can only be done by inserting, moving or
* removing nested Views via a {@link ViewContainer}. Each View can contain many View Containers.
* <!-- /TODO -->
*
* ### Example
*
* Given this template...
*
* ```
* Count: {{items.length}}
* <ul>
* <li *ng-for="var item of items">{{item}}</li>
* </ul>
* ```
*
* ... we have two {@link ProtoViewRef}s:
*
* Outer {@link ProtoViewRef}:
* ```
* Count: {{items.length}}
* <ul>
* <template ng-for var-item [ng-for-of]="items"></template>
* </ul>
* ```
*
* Inner {@link ProtoViewRef}:
* ```
* <li>{{item}}</li>
* ```
*
* Notice that the original template is broken down into two separate {@link ProtoViewRef}s.
*
* The outer/inner {@link ProtoViewRef}s are then assembled into views like so:
* | * <!-- ViewRef: outer-0 -->
* Count: 2
* <ul>
* <template view-container-ref></template>
* <!-- ViewRef: inner-1 --><li>first</li><!-- /ViewRef: inner-1 -->
* <!-- ViewRef: inner-2 --><li>second</li><!-- /ViewRef: inner-2 -->
* </ul>
* <!-- /ViewRef: outer-0 -->
* ```
*/
export declare abstract class ViewRef implements HostViewRef {
/**
* Sets `value` of local variable called `variableName` in this View.
*/
abstract setLocal(variableName: string, value: any): void;
changeDetectorRef: ChangeDetectorRef;
}
export declare class ViewRef_ extends ViewRef {
private _changeDetectorRef;
constructor(_view: viewModule.AppView);
/**
* Return `RenderViewRef`
*/
render: RenderViewRef;
/**
* Return `RenderFragmentRef`
*/
renderFragment: RenderFragmentRef;
/**
* Return `ChangeDetectorRef`
*/
changeDetectorRef: ChangeDetectorRef;
setLocal(variableName: string, value: any): void;
}
/**
* Represents an Angular ProtoView.
*
* A ProtoView is a prototypical {@link ViewRef View} that is the result of Template compilation and
* is used by Angular to efficiently create an instance of this View based on the compiled Template.
*
* Most ProtoViews are created and used internally by Angular and you don't need to know about them,
* except in advanced use-cases where you compile components yourself via the low-level
* {@link Compiler#compileInHost} API.
*
*
* ### Example
*
* Given this template:
*
* ```
* Count: {{items.length}}
* <ul>
* <li *ng-for="var item of items">{{item}}</li>
* </ul>
* ```
*
* Angular desugars and compiles the template into two ProtoViews:
*
* Outer ProtoView:
* ```
* Count: {{items.length}}
* <ul>
* <template ng-for var-item [ng-for-of]="items"></template>
* </ul>
* ```
*
* Inner ProtoView:
* ```
* <li>{{item}}</li>
* ```
*
* Notice that the original template is broken down into two separate ProtoViews.
*/
export declare abstract class ProtoViewRef {
}
export declare class ProtoViewRef_ extends ProtoViewRef {
constructor(_protoView: viewModule.AppProtoView);
} | * ``` | random_line_split |
view_ref.d.ts | import * as viewModule from './view';
import { ChangeDetectorRef } from '../change_detection/change_detector_ref';
import { RenderViewRef, RenderFragmentRef } from 'angular2/src/core/render/api';
export declare function internalView(viewRef: ViewRef): viewModule.AppView;
export declare function internalProtoView(protoViewRef: ProtoViewRef): viewModule.AppProtoView;
/**
* Represents a View containing a single Element that is the Host Element of a {@link Component}
* instance.
*
* A Host View is created for every dynamically created Component that was compiled on its own (as
* opposed to as a part of another Component's Template) via {@link Compiler#compileInHost} or one
* of the higher-level APIs: {@link AppViewManager#createRootHostView},
* {@link AppViewManager#createHostViewInContainer}, {@link ViewContainerRef#createHostView}.
*/
export interface HostViewRef {
}
/**
* Represents an Angular View.
*
* <!-- TODO: move the next two paragraphs to the dev guide -->
* A View is a fundamental building block of the application UI. It is the smallest grouping of
* Elements which are created and destroyed together.
*
* Properties of elements in a View can change, but the structure (number and order) of elements in
* a View cannot. Changing the structure of Elements can only be done by inserting, moving or
* removing nested Views via a {@link ViewContainer}. Each View can contain many View Containers.
* <!-- /TODO -->
*
* ### Example
*
* Given this template...
*
* ```
* Count: {{items.length}}
* <ul>
* <li *ng-for="var item of items">{{item}}</li>
* </ul>
* ```
*
* ... we have two {@link ProtoViewRef}s:
*
* Outer {@link ProtoViewRef}:
* ```
* Count: {{items.length}}
* <ul>
* <template ng-for var-item [ng-for-of]="items"></template>
* </ul>
* ```
*
* Inner {@link ProtoViewRef}:
* ```
* <li>{{item}}</li>
* ```
*
* Notice that the original template is broken down into two separate {@link ProtoViewRef}s.
*
* The outer/inner {@link ProtoViewRef}s are then assembled into views like so:
*
* ```
* <!-- ViewRef: outer-0 -->
* Count: 2
* <ul>
* <template view-container-ref></template>
* <!-- ViewRef: inner-1 --><li>first</li><!-- /ViewRef: inner-1 -->
* <!-- ViewRef: inner-2 --><li>second</li><!-- /ViewRef: inner-2 -->
* </ul>
* <!-- /ViewRef: outer-0 -->
* ```
*/
export declare abstract class ViewRef implements HostViewRef {
/**
* Sets `value` of local variable called `variableName` in this View.
*/
abstract setLocal(variableName: string, value: any): void;
changeDetectorRef: ChangeDetectorRef;
}
export declare class ViewRef_ extends ViewRef {
private _changeDetectorRef;
constructor(_view: viewModule.AppView);
/**
* Return `RenderViewRef`
*/
render: RenderViewRef;
/**
* Return `RenderFragmentRef`
*/
renderFragment: RenderFragmentRef;
/**
* Return `ChangeDetectorRef`
*/
changeDetectorRef: ChangeDetectorRef;
setLocal(variableName: string, value: any): void;
}
/**
* Represents an Angular ProtoView.
*
* A ProtoView is a prototypical {@link ViewRef View} that is the result of Template compilation and
* is used by Angular to efficiently create an instance of this View based on the compiled Template.
*
* Most ProtoViews are created and used internally by Angular and you don't need to know about them,
* except in advanced use-cases where you compile components yourself via the low-level
* {@link Compiler#compileInHost} API.
*
*
* ### Example
*
* Given this template:
*
* ```
* Count: {{items.length}}
* <ul>
* <li *ng-for="var item of items">{{item}}</li>
* </ul>
* ```
*
* Angular desugars and compiles the template into two ProtoViews:
*
* Outer ProtoView:
* ```
* Count: {{items.length}}
* <ul>
* <template ng-for var-item [ng-for-of]="items"></template>
* </ul>
* ```
*
* Inner ProtoView:
* ```
* <li>{{item}}</li>
* ```
*
* Notice that the original template is broken down into two separate ProtoViews.
*/
export declare abstract class ProtoViewRef {
}
export declare class | extends ProtoViewRef {
constructor(_protoView: viewModule.AppProtoView);
}
| ProtoViewRef_ | identifier_name |
test_sync.rs | use std::sync::Arc;
use std::thread;
// use protobuf::CodedInputStream;
// use protobuf::Message;
use quick_protobuf::*;
use super::basic::*;
// test messages are sync
#[test]
fn test_sync() | {
let m = Arc::new({
let mut r = TestTypesSingular::default();
r.int32_field = Some(23);
r
});
let threads: Vec<_> = (0..4)
.map(|_| {
let m_copy = m.clone();
thread::spawn(move || {
let mut bytes = Vec::new();
{
let mut writer = Writer::new(&mut bytes);
m_copy.write_message(&mut writer).unwrap();
}
let mut reader = BytesReader::from_bytes(&bytes);
let read = TestTypesSingular::from_reader(&mut reader, &bytes).unwrap();
read.int32_field
})
})
.collect();
let results = threads
.into_iter()
.map(|t| t.join().unwrap())
.collect::<Vec<_>>();
assert_eq!(&[Some(23), Some(23), Some(23), Some(23)], &results[..]);
} | identifier_body |
|
test_sync.rs | use std::sync::Arc;
use std::thread;
// use protobuf::CodedInputStream;
// use protobuf::Message;
use quick_protobuf::*;
use super::basic::*;
// test messages are sync
#[test]
fn | () {
let m = Arc::new({
let mut r = TestTypesSingular::default();
r.int32_field = Some(23);
r
});
let threads: Vec<_> = (0..4)
.map(|_| {
let m_copy = m.clone();
thread::spawn(move || {
let mut bytes = Vec::new();
{
let mut writer = Writer::new(&mut bytes);
m_copy.write_message(&mut writer).unwrap();
}
let mut reader = BytesReader::from_bytes(&bytes);
let read = TestTypesSingular::from_reader(&mut reader, &bytes).unwrap();
read.int32_field
})
})
.collect();
let results = threads
.into_iter()
.map(|t| t.join().unwrap())
.collect::<Vec<_>>();
assert_eq!(&[Some(23), Some(23), Some(23), Some(23)], &results[..]);
}
| test_sync | identifier_name |
test_sync.rs | use std::sync::Arc;
use std::thread;
// use protobuf::CodedInputStream;
// use protobuf::Message;
use quick_protobuf::*;
use super::basic::*;
// test messages are sync
#[test]
fn test_sync() {
let m = Arc::new({
let mut r = TestTypesSingular::default();
r.int32_field = Some(23);
r
});
let threads: Vec<_> = (0..4)
.map(|_| {
let m_copy = m.clone();
thread::spawn(move || {
let mut bytes = Vec::new(); | let mut reader = BytesReader::from_bytes(&bytes);
let read = TestTypesSingular::from_reader(&mut reader, &bytes).unwrap();
read.int32_field
})
})
.collect();
let results = threads
.into_iter()
.map(|t| t.join().unwrap())
.collect::<Vec<_>>();
assert_eq!(&[Some(23), Some(23), Some(23), Some(23)], &results[..]);
} | {
let mut writer = Writer::new(&mut bytes);
m_copy.write_message(&mut writer).unwrap();
} | random_line_split |
test_carrot.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using carrot
"""
from nova import log as logging
from nova.rpc import impl_carrot
from nova.tests.rpc import common
LOG = logging.getLogger('nova.tests.rpc')
class RpcCarrotTestCase(common._BaseRpcTestCase):
def setUp(self):
self.rpc = impl_carrot
super(RpcCarrotTestCase, self).setUp()
def tearDown(self):
super(RpcCarrotTestCase, self).tearDown()
def | (self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn1)
conn2 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
| test_connectionpool_single | identifier_name |
test_carrot.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using carrot
"""
from nova import log as logging
from nova.rpc import impl_carrot
from nova.tests.rpc import common
LOG = logging.getLogger('nova.tests.rpc')
class RpcCarrotTestCase(common._BaseRpcTestCase):
def setUp(self):
self.rpc = impl_carrot
super(RpcCarrotTestCase, self).setUp()
def tearDown(self):
|
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn1)
conn2 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
| super(RpcCarrotTestCase, self).tearDown() | identifier_body |
test_carrot.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using carrot
"""
from nova import log as logging
from nova.rpc import impl_carrot
from nova.tests.rpc import common
LOG = logging.getLogger('nova.tests.rpc')
| def tearDown(self):
super(RpcCarrotTestCase, self).tearDown()
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn1)
conn2 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2) | class RpcCarrotTestCase(common._BaseRpcTestCase):
def setUp(self):
self.rpc = impl_carrot
super(RpcCarrotTestCase, self).setUp()
| random_line_split |
conf.py | # -*- coding: utf-8 -*-
#
# Phaser Editor documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 08:35:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
#'rinoh.frontend.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Phaser Editor 2D'
copyright = u'2016-2020, Arian Fornaris'
author = u'Arian Fornaris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.1.7'
# The full version, including alpha/beta/rc tags.
release = u'2.1.7'
# The language for content autogenerated by Sphinx. Refer to documentation |
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#import sphinx_rtd_theme
html_theme = "phaser-editor"
# Uncomment for generate Eclipse Offline Help
#html_theme = "eclipse-help"
html_theme_path = ["_themes"]
html_show_sourcelink = False
html_show_sphinx = False
html_favicon = "logo.png"
html_title = "Phaser Editor Help"
html_show_copyright = True
print(html_theme_path)
#html_theme = 'classic'
highlight_language = 'javascript'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhaserEditordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '',
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PhaserEditor2D.tex', u'Phaser Editor 2D Documentation',
u'Arian Fornaris', 'manual'),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PhaserEditor2D', u'Phaser Editor 2D Documentation',
author, 'Arian', 'A friendly HTML5 game IDE.',
'Miscellaneous'),
] | # for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None | random_line_split |
Graph2d.js | var Emitter = require('emitter-component');
var Hammer = require('../module/hammer');
var moment = require('../module/moment');
var util = require('../util');
var DataSet = require('../DataSet');
var DataView = require('../DataView');
var Range = require('./Range');
var Core = require('./Core');
var TimeAxis = require('./component/TimeAxis');
var CurrentTime = require('./component/CurrentTime');
var CustomTime = require('./component/CustomTime');
var LineGraph = require('./component/LineGraph');
var Configurator = require('../shared/Configurator');
var Validator = require('../shared/Validator').default;
var printStyle = require('../shared/Validator').printStyle;
var allOptions = require('./optionsGraph2d').allOptions;
var configureOptions = require('./optionsGraph2d').configureOptions;
/**
* Create a timeline visualization
* @param {HTMLElement} container
* @param {vis.DataSet | Array} [items]
* @param {Object} [options] See Graph2d.setOptions for the available options.
* @constructor
* @extends Core
*/
function Graph2d (container, items, groups, options) {
// if the third element is options, the forth is groups (optionally);
if (!(Array.isArray(groups) || groups instanceof DataSet || groups instanceof DataView) && groups instanceof Object) {
var forthArgument = options;
options = groups;
groups = forthArgument;
}
var me = this;
this.defaultOptions = {
start: null,
end: null,
autoResize: true,
orientation: {
axis: 'bottom', // axis orientation: 'bottom', 'top', or 'both'
item: 'bottom' // not relevant for Graph2d
},
moment: moment,
width: null,
height: null,
maxHeight: null,
minHeight: null
};
this.options = util.deepExtend({}, this.defaultOptions);
// Create the DOM, props, and emitter
this._create(container);
// all components listed here will be repainted automatically
this.components = [];
this.body = {
dom: this.dom,
domProps: this.props,
emitter: {
on: this.on.bind(this),
off: this.off.bind(this),
emit: this.emit.bind(this)
},
hiddenDates: [],
util: {
toScreen: me._toScreen.bind(me),
toGlobalScreen: me._toGlobalScreen.bind(me), // this refers to the root.width
toTime: me._toTime.bind(me),
toGlobalTime : me._toGlobalTime.bind(me)
}
};
// range
this.range = new Range(this.body);
this.components.push(this.range);
this.body.range = this.range;
// time axis
this.timeAxis = new TimeAxis(this.body);
this.components.push(this.timeAxis);
//this.body.util.snap = this.timeAxis.snap.bind(this.timeAxis);
// current time bar
this.currentTime = new CurrentTime(this.body);
this.components.push(this.currentTime);
// item set
this.linegraph = new LineGraph(this.body);
this.components.push(this.linegraph);
this.itemsData = null; // DataSet
this.groupsData = null; // DataSet
this.on('tap', function (event) {
me.emit('click', me.getEventProperties(event))
});
this.on('doubletap', function (event) {
me.emit('doubleClick', me.getEventProperties(event))
});
this.dom.root.oncontextmenu = function (event) {
me.emit('contextmenu', me.getEventProperties(event))
};
// apply options
if (options) {
this.setOptions(options);
}
// IMPORTANT: THIS HAPPENS BEFORE SET ITEMS!
if (groups) {
this.setGroups(groups);
}
// create itemset
if (items) {
this.setItems(items);
}
// draw for the first time
this._redraw();
}
// Extend the functionality from Core
Graph2d.prototype = new Core();
Graph2d.prototype.setOptions = function (options) {
// validate options
let errorFound = Validator.validate(options, allOptions);
if (errorFound === true) {
console.log('%cErrors have been found in the supplied options object.', printStyle);
}
Core.prototype.setOptions.call(this, options);
};
/**
* Set items
* @param {vis.DataSet | Array | null} items
*/
Graph2d.prototype.setItems = function(items) {
var initialLoad = (this.itemsData == null);
// convert to type DataSet when needed
var newDataSet;
if (!items) {
newDataSet = null;
}
else if (items instanceof DataSet || items instanceof DataView) {
newDataSet = items;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(items, {
type: {
start: 'Date',
end: 'Date'
}
});
}
// set items
this.itemsData = newDataSet;
this.linegraph && this.linegraph.setItems(newDataSet);
if (initialLoad) {
if (this.options.start != undefined || this.options.end != undefined) {
var start = this.options.start != undefined ? this.options.start : null;
var end = this.options.end != undefined ? this.options.end : null;
this.setWindow(start, end, {animation: false});
}
else {
this.fit({animation: false});
}
}
};
/**
* Set groups
* @param {vis.DataSet | Array} groups
*/
Graph2d.prototype.setGroups = function(groups) {
// convert to type DataSet when needed
var newDataSet;
if (!groups) {
newDataSet = null;
}
else if (groups instanceof DataSet || groups instanceof DataView) {
newDataSet = groups;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(groups);
}
this.groupsData = newDataSet;
this.linegraph.setGroups(newDataSet);
};
/**
* Returns an object containing an SVG element with the icon of the group (size determined by iconWidth and iconHeight), the label of the group (content) and the yAxisOrientation of the group (left or right).
* @param groupId
* @param width
* @param height
*/
Graph2d.prototype.getLegend = function(groupId, width, height) {
if (width === undefined) {width = 15;}
if (height === undefined) {height = 15;}
if (this.linegraph.groups[groupId] !== undefined) {
return this.linegraph.groups[groupId].getLegend(width,height);
}
else {
return "cannot find group:'" + groupId + "'";
}
};
/**
* This checks if the visible option of the supplied group (by ID) is true or false.
* @param groupId
* @returns {*}
*/
Graph2d.prototype.isGroupVisible = function(groupId) {
if (this.linegraph.groups[groupId] !== undefined) {
return (this.linegraph.groups[groupId].visible && (this.linegraph.options.groups.visibility[groupId] === undefined || this.linegraph.options.groups.visibility[groupId] == true));
}
else {
return false;
}
};
/**
* Get the data range of the item set.
* @returns {{min: Date, max: Date}} range A range with a start and end Date.
* When no minimum is found, min==null
* When no maximum is found, max==null
*/
Graph2d.prototype.getDataRange = function() {
var min = null;
var max = null;
// calculate min from start filed
for (var groupId in this.linegraph.groups) {
if (this.linegraph.groups.hasOwnProperty(groupId)) {
if (this.linegraph.groups[groupId].visible == true) {
for (var i = 0; i < this.linegraph.groups[groupId].itemsData.length; i++) {
var item = this.linegraph.groups[groupId].itemsData[i];
var value = util.convert(item.x, 'Date').valueOf();
min = min == null ? value : min > value ? value : min;
max = max == null ? value : max < value ? value : max;
}
}
}
}
return {
min: (min != null) ? new Date(min) : null,
max: (max != null) ? new Date(max) : null
};
};
/**
* Generate Timeline related information from an event
* @param {Event} event
* @return {Object} An object with related information, like on which area
* The event happened, whether clicked on an item, etc.
*/
Graph2d.prototype.getEventProperties = function (event) {
var clientX = event.center ? event.center.x : event.clientX;
var clientY = event.center ? event.center.y : event.clientY;
var x = clientX - util.getAbsoluteLeft(this.dom.centerContainer);
var y = clientY - util.getAbsoluteTop(this.dom.centerContainer);
var time = this._toTime(x);
var customTime = CustomTime.customTimeFromTarget(event);
var element = util.getTarget(event);
var what = null;
if (util.hasParent(element, this.timeAxis.dom.foreground)) {what = 'axis';} | else if (util.hasParent(element, this.linegraph.legendLeft.dom.frame)) {what = 'legend';}
else if (util.hasParent(element, this.linegraph.legendRight.dom.frame)) {what = 'legend';}
else if (customTime != null) {what = 'custom-time';}
else if (util.hasParent(element, this.currentTime.bar)) {what = 'current-time';}
else if (util.hasParent(element, this.dom.center)) {what = 'background';}
var value = [];
var yAxisLeft = this.linegraph.yAxisLeft;
var yAxisRight = this.linegraph.yAxisRight;
if (!yAxisLeft.hidden) {
value.push(yAxisLeft.screenToValue(y));
}
if (!yAxisRight.hidden) {
value.push(yAxisRight.screenToValue(y));
}
return {
event: event,
what: what,
pageX: event.srcEvent ? event.srcEvent.pageX : event.pageX,
pageY: event.srcEvent ? event.srcEvent.pageY : event.pageY,
x: x,
y: y,
time: time,
value: value
}
};
/**
* Load a configurator
* @return {Object}
* @private
*/
Graph2d.prototype._createConfigurator = function () {
return new Configurator(this, this.dom.container, configureOptions);
};
module.exports = Graph2d; | else if (this.timeAxis2 && util.hasParent(element, this.timeAxis2.dom.foreground)) {what = 'axis';}
else if (util.hasParent(element, this.linegraph.yAxisLeft.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.yAxisRight.dom.frame)) {what = 'data-axis';} | random_line_split |
Graph2d.js | var Emitter = require('emitter-component');
var Hammer = require('../module/hammer');
var moment = require('../module/moment');
var util = require('../util');
var DataSet = require('../DataSet');
var DataView = require('../DataView');
var Range = require('./Range');
var Core = require('./Core');
var TimeAxis = require('./component/TimeAxis');
var CurrentTime = require('./component/CurrentTime');
var CustomTime = require('./component/CustomTime');
var LineGraph = require('./component/LineGraph');
var Configurator = require('../shared/Configurator');
var Validator = require('../shared/Validator').default;
var printStyle = require('../shared/Validator').printStyle;
var allOptions = require('./optionsGraph2d').allOptions;
var configureOptions = require('./optionsGraph2d').configureOptions;
/**
* Create a timeline visualization
* @param {HTMLElement} container
* @param {vis.DataSet | Array} [items]
* @param {Object} [options] See Graph2d.setOptions for the available options.
* @constructor
* @extends Core
*/
function Graph2d (container, items, groups, options) |
// Extend the functionality from Core
Graph2d.prototype = new Core();
Graph2d.prototype.setOptions = function (options) {
// validate options
let errorFound = Validator.validate(options, allOptions);
if (errorFound === true) {
console.log('%cErrors have been found in the supplied options object.', printStyle);
}
Core.prototype.setOptions.call(this, options);
};
/**
* Set items
* @param {vis.DataSet | Array | null} items
*/
Graph2d.prototype.setItems = function(items) {
var initialLoad = (this.itemsData == null);
// convert to type DataSet when needed
var newDataSet;
if (!items) {
newDataSet = null;
}
else if (items instanceof DataSet || items instanceof DataView) {
newDataSet = items;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(items, {
type: {
start: 'Date',
end: 'Date'
}
});
}
// set items
this.itemsData = newDataSet;
this.linegraph && this.linegraph.setItems(newDataSet);
if (initialLoad) {
if (this.options.start != undefined || this.options.end != undefined) {
var start = this.options.start != undefined ? this.options.start : null;
var end = this.options.end != undefined ? this.options.end : null;
this.setWindow(start, end, {animation: false});
}
else {
this.fit({animation: false});
}
}
};
/**
* Set groups
* @param {vis.DataSet | Array} groups
*/
Graph2d.prototype.setGroups = function(groups) {
// convert to type DataSet when needed
var newDataSet;
if (!groups) {
newDataSet = null;
}
else if (groups instanceof DataSet || groups instanceof DataView) {
newDataSet = groups;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(groups);
}
this.groupsData = newDataSet;
this.linegraph.setGroups(newDataSet);
};
/**
* Returns an object containing an SVG element with the icon of the group (size determined by iconWidth and iconHeight), the label of the group (content) and the yAxisOrientation of the group (left or right).
* @param groupId
* @param width
* @param height
*/
Graph2d.prototype.getLegend = function(groupId, width, height) {
if (width === undefined) {width = 15;}
if (height === undefined) {height = 15;}
if (this.linegraph.groups[groupId] !== undefined) {
return this.linegraph.groups[groupId].getLegend(width,height);
}
else {
return "cannot find group:'" + groupId + "'";
}
};
/**
* This checks if the visible option of the supplied group (by ID) is true or false.
* @param groupId
* @returns {*}
*/
Graph2d.prototype.isGroupVisible = function(groupId) {
if (this.linegraph.groups[groupId] !== undefined) {
return (this.linegraph.groups[groupId].visible && (this.linegraph.options.groups.visibility[groupId] === undefined || this.linegraph.options.groups.visibility[groupId] == true));
}
else {
return false;
}
};
/**
* Get the data range of the item set.
* @returns {{min: Date, max: Date}} range A range with a start and end Date.
* When no minimum is found, min==null
* When no maximum is found, max==null
*/
Graph2d.prototype.getDataRange = function() {
var min = null;
var max = null;
// calculate min from start filed
for (var groupId in this.linegraph.groups) {
if (this.linegraph.groups.hasOwnProperty(groupId)) {
if (this.linegraph.groups[groupId].visible == true) {
for (var i = 0; i < this.linegraph.groups[groupId].itemsData.length; i++) {
var item = this.linegraph.groups[groupId].itemsData[i];
var value = util.convert(item.x, 'Date').valueOf();
min = min == null ? value : min > value ? value : min;
max = max == null ? value : max < value ? value : max;
}
}
}
}
return {
min: (min != null) ? new Date(min) : null,
max: (max != null) ? new Date(max) : null
};
};
/**
* Generate Timeline related information from an event
* @param {Event} event
* @return {Object} An object with related information, like on which area
* The event happened, whether clicked on an item, etc.
*/
Graph2d.prototype.getEventProperties = function (event) {
var clientX = event.center ? event.center.x : event.clientX;
var clientY = event.center ? event.center.y : event.clientY;
var x = clientX - util.getAbsoluteLeft(this.dom.centerContainer);
var y = clientY - util.getAbsoluteTop(this.dom.centerContainer);
var time = this._toTime(x);
var customTime = CustomTime.customTimeFromTarget(event);
var element = util.getTarget(event);
var what = null;
if (util.hasParent(element, this.timeAxis.dom.foreground)) {what = 'axis';}
else if (this.timeAxis2 && util.hasParent(element, this.timeAxis2.dom.foreground)) {what = 'axis';}
else if (util.hasParent(element, this.linegraph.yAxisLeft.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.yAxisRight.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.legendLeft.dom.frame)) {what = 'legend';}
else if (util.hasParent(element, this.linegraph.legendRight.dom.frame)) {what = 'legend';}
else if (customTime != null) {what = 'custom-time';}
else if (util.hasParent(element, this.currentTime.bar)) {what = 'current-time';}
else if (util.hasParent(element, this.dom.center)) {what = 'background';}
var value = [];
var yAxisLeft = this.linegraph.yAxisLeft;
var yAxisRight = this.linegraph.yAxisRight;
if (!yAxisLeft.hidden) {
value.push(yAxisLeft.screenToValue(y));
}
if (!yAxisRight.hidden) {
value.push(yAxisRight.screenToValue(y));
}
return {
event: event,
what: what,
pageX: event.srcEvent ? event.srcEvent.pageX : event.pageX,
pageY: event.srcEvent ? event.srcEvent.pageY : event.pageY,
x: x,
y: y,
time: time,
value: value
}
};
/**
* Load a configurator
* @return {Object}
* @private
*/
Graph2d.prototype._createConfigurator = function () {
return new Configurator(this, this.dom.container, configureOptions);
};
module.exports = Graph2d;
| {
// if the third element is options, the forth is groups (optionally);
if (!(Array.isArray(groups) || groups instanceof DataSet || groups instanceof DataView) && groups instanceof Object) {
var forthArgument = options;
options = groups;
groups = forthArgument;
}
var me = this;
this.defaultOptions = {
start: null,
end: null,
autoResize: true,
orientation: {
axis: 'bottom', // axis orientation: 'bottom', 'top', or 'both'
item: 'bottom' // not relevant for Graph2d
},
moment: moment,
width: null,
height: null,
maxHeight: null,
minHeight: null
};
this.options = util.deepExtend({}, this.defaultOptions);
// Create the DOM, props, and emitter
this._create(container);
// all components listed here will be repainted automatically
this.components = [];
this.body = {
dom: this.dom,
domProps: this.props,
emitter: {
on: this.on.bind(this),
off: this.off.bind(this),
emit: this.emit.bind(this)
},
hiddenDates: [],
util: {
toScreen: me._toScreen.bind(me),
toGlobalScreen: me._toGlobalScreen.bind(me), // this refers to the root.width
toTime: me._toTime.bind(me),
toGlobalTime : me._toGlobalTime.bind(me)
}
};
// range
this.range = new Range(this.body);
this.components.push(this.range);
this.body.range = this.range;
// time axis
this.timeAxis = new TimeAxis(this.body);
this.components.push(this.timeAxis);
//this.body.util.snap = this.timeAxis.snap.bind(this.timeAxis);
// current time bar
this.currentTime = new CurrentTime(this.body);
this.components.push(this.currentTime);
// item set
this.linegraph = new LineGraph(this.body);
this.components.push(this.linegraph);
this.itemsData = null; // DataSet
this.groupsData = null; // DataSet
this.on('tap', function (event) {
me.emit('click', me.getEventProperties(event))
});
this.on('doubletap', function (event) {
me.emit('doubleClick', me.getEventProperties(event))
});
this.dom.root.oncontextmenu = function (event) {
me.emit('contextmenu', me.getEventProperties(event))
};
// apply options
if (options) {
this.setOptions(options);
}
// IMPORTANT: THIS HAPPENS BEFORE SET ITEMS!
if (groups) {
this.setGroups(groups);
}
// create itemset
if (items) {
this.setItems(items);
}
// draw for the first time
this._redraw();
} | identifier_body |
Graph2d.js | var Emitter = require('emitter-component');
var Hammer = require('../module/hammer');
var moment = require('../module/moment');
var util = require('../util');
var DataSet = require('../DataSet');
var DataView = require('../DataView');
var Range = require('./Range');
var Core = require('./Core');
var TimeAxis = require('./component/TimeAxis');
var CurrentTime = require('./component/CurrentTime');
var CustomTime = require('./component/CustomTime');
var LineGraph = require('./component/LineGraph');
var Configurator = require('../shared/Configurator');
var Validator = require('../shared/Validator').default;
var printStyle = require('../shared/Validator').printStyle;
var allOptions = require('./optionsGraph2d').allOptions;
var configureOptions = require('./optionsGraph2d').configureOptions;
/**
* Create a timeline visualization
* @param {HTMLElement} container
* @param {vis.DataSet | Array} [items]
* @param {Object} [options] See Graph2d.setOptions for the available options.
* @constructor
* @extends Core
*/
function | (container, items, groups, options) {
// if the third element is options, the forth is groups (optionally);
if (!(Array.isArray(groups) || groups instanceof DataSet || groups instanceof DataView) && groups instanceof Object) {
var forthArgument = options;
options = groups;
groups = forthArgument;
}
var me = this;
this.defaultOptions = {
start: null,
end: null,
autoResize: true,
orientation: {
axis: 'bottom', // axis orientation: 'bottom', 'top', or 'both'
item: 'bottom' // not relevant for Graph2d
},
moment: moment,
width: null,
height: null,
maxHeight: null,
minHeight: null
};
this.options = util.deepExtend({}, this.defaultOptions);
// Create the DOM, props, and emitter
this._create(container);
// all components listed here will be repainted automatically
this.components = [];
this.body = {
dom: this.dom,
domProps: this.props,
emitter: {
on: this.on.bind(this),
off: this.off.bind(this),
emit: this.emit.bind(this)
},
hiddenDates: [],
util: {
toScreen: me._toScreen.bind(me),
toGlobalScreen: me._toGlobalScreen.bind(me), // this refers to the root.width
toTime: me._toTime.bind(me),
toGlobalTime : me._toGlobalTime.bind(me)
}
};
// range
this.range = new Range(this.body);
this.components.push(this.range);
this.body.range = this.range;
// time axis
this.timeAxis = new TimeAxis(this.body);
this.components.push(this.timeAxis);
//this.body.util.snap = this.timeAxis.snap.bind(this.timeAxis);
// current time bar
this.currentTime = new CurrentTime(this.body);
this.components.push(this.currentTime);
// item set
this.linegraph = new LineGraph(this.body);
this.components.push(this.linegraph);
this.itemsData = null; // DataSet
this.groupsData = null; // DataSet
this.on('tap', function (event) {
me.emit('click', me.getEventProperties(event))
});
this.on('doubletap', function (event) {
me.emit('doubleClick', me.getEventProperties(event))
});
this.dom.root.oncontextmenu = function (event) {
me.emit('contextmenu', me.getEventProperties(event))
};
// apply options
if (options) {
this.setOptions(options);
}
// IMPORTANT: THIS HAPPENS BEFORE SET ITEMS!
if (groups) {
this.setGroups(groups);
}
// create itemset
if (items) {
this.setItems(items);
}
// draw for the first time
this._redraw();
}
// Extend the functionality from Core
Graph2d.prototype = new Core();
Graph2d.prototype.setOptions = function (options) {
// validate options
let errorFound = Validator.validate(options, allOptions);
if (errorFound === true) {
console.log('%cErrors have been found in the supplied options object.', printStyle);
}
Core.prototype.setOptions.call(this, options);
};
/**
* Set items
* @param {vis.DataSet | Array | null} items
*/
Graph2d.prototype.setItems = function(items) {
var initialLoad = (this.itemsData == null);
// convert to type DataSet when needed
var newDataSet;
if (!items) {
newDataSet = null;
}
else if (items instanceof DataSet || items instanceof DataView) {
newDataSet = items;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(items, {
type: {
start: 'Date',
end: 'Date'
}
});
}
// set items
this.itemsData = newDataSet;
this.linegraph && this.linegraph.setItems(newDataSet);
if (initialLoad) {
if (this.options.start != undefined || this.options.end != undefined) {
var start = this.options.start != undefined ? this.options.start : null;
var end = this.options.end != undefined ? this.options.end : null;
this.setWindow(start, end, {animation: false});
}
else {
this.fit({animation: false});
}
}
};
/**
* Set groups
* @param {vis.DataSet | Array} groups
*/
Graph2d.prototype.setGroups = function(groups) {
// convert to type DataSet when needed
var newDataSet;
if (!groups) {
newDataSet = null;
}
else if (groups instanceof DataSet || groups instanceof DataView) {
newDataSet = groups;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(groups);
}
this.groupsData = newDataSet;
this.linegraph.setGroups(newDataSet);
};
/**
* Returns an object containing an SVG element with the icon of the group (size determined by iconWidth and iconHeight), the label of the group (content) and the yAxisOrientation of the group (left or right).
* @param groupId
* @param width
* @param height
*/
Graph2d.prototype.getLegend = function(groupId, width, height) {
if (width === undefined) {width = 15;}
if (height === undefined) {height = 15;}
if (this.linegraph.groups[groupId] !== undefined) {
return this.linegraph.groups[groupId].getLegend(width,height);
}
else {
return "cannot find group:'" + groupId + "'";
}
};
/**
* This checks if the visible option of the supplied group (by ID) is true or false.
* @param groupId
* @returns {*}
*/
Graph2d.prototype.isGroupVisible = function(groupId) {
if (this.linegraph.groups[groupId] !== undefined) {
return (this.linegraph.groups[groupId].visible && (this.linegraph.options.groups.visibility[groupId] === undefined || this.linegraph.options.groups.visibility[groupId] == true));
}
else {
return false;
}
};
/**
* Get the data range of the item set.
* @returns {{min: Date, max: Date}} range A range with a start and end Date.
* When no minimum is found, min==null
* When no maximum is found, max==null
*/
Graph2d.prototype.getDataRange = function() {
var min = null;
var max = null;
// calculate min from start filed
for (var groupId in this.linegraph.groups) {
if (this.linegraph.groups.hasOwnProperty(groupId)) {
if (this.linegraph.groups[groupId].visible == true) {
for (var i = 0; i < this.linegraph.groups[groupId].itemsData.length; i++) {
var item = this.linegraph.groups[groupId].itemsData[i];
var value = util.convert(item.x, 'Date').valueOf();
min = min == null ? value : min > value ? value : min;
max = max == null ? value : max < value ? value : max;
}
}
}
}
return {
min: (min != null) ? new Date(min) : null,
max: (max != null) ? new Date(max) : null
};
};
/**
* Generate Timeline related information from an event
* @param {Event} event
* @return {Object} An object with related information, like on which area
* The event happened, whether clicked on an item, etc.
*/
Graph2d.prototype.getEventProperties = function (event) {
var clientX = event.center ? event.center.x : event.clientX;
var clientY = event.center ? event.center.y : event.clientY;
var x = clientX - util.getAbsoluteLeft(this.dom.centerContainer);
var y = clientY - util.getAbsoluteTop(this.dom.centerContainer);
var time = this._toTime(x);
var customTime = CustomTime.customTimeFromTarget(event);
var element = util.getTarget(event);
var what = null;
if (util.hasParent(element, this.timeAxis.dom.foreground)) {what = 'axis';}
else if (this.timeAxis2 && util.hasParent(element, this.timeAxis2.dom.foreground)) {what = 'axis';}
else if (util.hasParent(element, this.linegraph.yAxisLeft.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.yAxisRight.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.legendLeft.dom.frame)) {what = 'legend';}
else if (util.hasParent(element, this.linegraph.legendRight.dom.frame)) {what = 'legend';}
else if (customTime != null) {what = 'custom-time';}
else if (util.hasParent(element, this.currentTime.bar)) {what = 'current-time';}
else if (util.hasParent(element, this.dom.center)) {what = 'background';}
var value = [];
var yAxisLeft = this.linegraph.yAxisLeft;
var yAxisRight = this.linegraph.yAxisRight;
if (!yAxisLeft.hidden) {
value.push(yAxisLeft.screenToValue(y));
}
if (!yAxisRight.hidden) {
value.push(yAxisRight.screenToValue(y));
}
return {
event: event,
what: what,
pageX: event.srcEvent ? event.srcEvent.pageX : event.pageX,
pageY: event.srcEvent ? event.srcEvent.pageY : event.pageY,
x: x,
y: y,
time: time,
value: value
}
};
/**
* Load a configurator
* @return {Object}
* @private
*/
Graph2d.prototype._createConfigurator = function () {
return new Configurator(this, this.dom.container, configureOptions);
};
module.exports = Graph2d;
| Graph2d | identifier_name |
Graph2d.js | var Emitter = require('emitter-component');
var Hammer = require('../module/hammer');
var moment = require('../module/moment');
var util = require('../util');
var DataSet = require('../DataSet');
var DataView = require('../DataView');
var Range = require('./Range');
var Core = require('./Core');
var TimeAxis = require('./component/TimeAxis');
var CurrentTime = require('./component/CurrentTime');
var CustomTime = require('./component/CustomTime');
var LineGraph = require('./component/LineGraph');
var Configurator = require('../shared/Configurator');
var Validator = require('../shared/Validator').default;
var printStyle = require('../shared/Validator').printStyle;
var allOptions = require('./optionsGraph2d').allOptions;
var configureOptions = require('./optionsGraph2d').configureOptions;
/**
* Create a timeline visualization
* @param {HTMLElement} container
* @param {vis.DataSet | Array} [items]
* @param {Object} [options] See Graph2d.setOptions for the available options.
* @constructor
* @extends Core
*/
function Graph2d (container, items, groups, options) {
// if the third element is options, the forth is groups (optionally);
if (!(Array.isArray(groups) || groups instanceof DataSet || groups instanceof DataView) && groups instanceof Object) {
var forthArgument = options;
options = groups;
groups = forthArgument;
}
var me = this;
this.defaultOptions = {
start: null,
end: null,
autoResize: true,
orientation: {
axis: 'bottom', // axis orientation: 'bottom', 'top', or 'both'
item: 'bottom' // not relevant for Graph2d
},
moment: moment,
width: null,
height: null,
maxHeight: null,
minHeight: null
};
this.options = util.deepExtend({}, this.defaultOptions);
// Create the DOM, props, and emitter
this._create(container);
// all components listed here will be repainted automatically
this.components = [];
this.body = {
dom: this.dom,
domProps: this.props,
emitter: {
on: this.on.bind(this),
off: this.off.bind(this),
emit: this.emit.bind(this)
},
hiddenDates: [],
util: {
toScreen: me._toScreen.bind(me),
toGlobalScreen: me._toGlobalScreen.bind(me), // this refers to the root.width
toTime: me._toTime.bind(me),
toGlobalTime : me._toGlobalTime.bind(me)
}
};
// range
this.range = new Range(this.body);
this.components.push(this.range);
this.body.range = this.range;
// time axis
this.timeAxis = new TimeAxis(this.body);
this.components.push(this.timeAxis);
//this.body.util.snap = this.timeAxis.snap.bind(this.timeAxis);
// current time bar
this.currentTime = new CurrentTime(this.body);
this.components.push(this.currentTime);
// item set
this.linegraph = new LineGraph(this.body);
this.components.push(this.linegraph);
this.itemsData = null; // DataSet
this.groupsData = null; // DataSet
this.on('tap', function (event) {
me.emit('click', me.getEventProperties(event))
});
this.on('doubletap', function (event) {
me.emit('doubleClick', me.getEventProperties(event))
});
this.dom.root.oncontextmenu = function (event) {
me.emit('contextmenu', me.getEventProperties(event))
};
// apply options
if (options) {
this.setOptions(options);
}
// IMPORTANT: THIS HAPPENS BEFORE SET ITEMS!
if (groups) {
this.setGroups(groups);
}
// create itemset
if (items) |
// draw for the first time
this._redraw();
}
// Extend the functionality from Core
Graph2d.prototype = new Core();
Graph2d.prototype.setOptions = function (options) {
// validate options
let errorFound = Validator.validate(options, allOptions);
if (errorFound === true) {
console.log('%cErrors have been found in the supplied options object.', printStyle);
}
Core.prototype.setOptions.call(this, options);
};
/**
* Set items
* @param {vis.DataSet | Array | null} items
*/
Graph2d.prototype.setItems = function(items) {
var initialLoad = (this.itemsData == null);
// convert to type DataSet when needed
var newDataSet;
if (!items) {
newDataSet = null;
}
else if (items instanceof DataSet || items instanceof DataView) {
newDataSet = items;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(items, {
type: {
start: 'Date',
end: 'Date'
}
});
}
// set items
this.itemsData = newDataSet;
this.linegraph && this.linegraph.setItems(newDataSet);
if (initialLoad) {
if (this.options.start != undefined || this.options.end != undefined) {
var start = this.options.start != undefined ? this.options.start : null;
var end = this.options.end != undefined ? this.options.end : null;
this.setWindow(start, end, {animation: false});
}
else {
this.fit({animation: false});
}
}
};
/**
* Set groups
* @param {vis.DataSet | Array} groups
*/
Graph2d.prototype.setGroups = function(groups) {
// convert to type DataSet when needed
var newDataSet;
if (!groups) {
newDataSet = null;
}
else if (groups instanceof DataSet || groups instanceof DataView) {
newDataSet = groups;
}
else {
// turn an array into a dataset
newDataSet = new DataSet(groups);
}
this.groupsData = newDataSet;
this.linegraph.setGroups(newDataSet);
};
/**
* Returns an object containing an SVG element with the icon of the group (size determined by iconWidth and iconHeight), the label of the group (content) and the yAxisOrientation of the group (left or right).
* @param groupId
* @param width
* @param height
*/
Graph2d.prototype.getLegend = function(groupId, width, height) {
if (width === undefined) {width = 15;}
if (height === undefined) {height = 15;}
if (this.linegraph.groups[groupId] !== undefined) {
return this.linegraph.groups[groupId].getLegend(width,height);
}
else {
return "cannot find group:'" + groupId + "'";
}
};
/**
* This checks if the visible option of the supplied group (by ID) is true or false.
* @param groupId
* @returns {*}
*/
Graph2d.prototype.isGroupVisible = function(groupId) {
if (this.linegraph.groups[groupId] !== undefined) {
return (this.linegraph.groups[groupId].visible && (this.linegraph.options.groups.visibility[groupId] === undefined || this.linegraph.options.groups.visibility[groupId] == true));
}
else {
return false;
}
};
/**
* Get the data range of the item set.
* @returns {{min: Date, max: Date}} range A range with a start and end Date.
* When no minimum is found, min==null
* When no maximum is found, max==null
*/
Graph2d.prototype.getDataRange = function() {
var min = null;
var max = null;
// calculate min from start filed
for (var groupId in this.linegraph.groups) {
if (this.linegraph.groups.hasOwnProperty(groupId)) {
if (this.linegraph.groups[groupId].visible == true) {
for (var i = 0; i < this.linegraph.groups[groupId].itemsData.length; i++) {
var item = this.linegraph.groups[groupId].itemsData[i];
var value = util.convert(item.x, 'Date').valueOf();
min = min == null ? value : min > value ? value : min;
max = max == null ? value : max < value ? value : max;
}
}
}
}
return {
min: (min != null) ? new Date(min) : null,
max: (max != null) ? new Date(max) : null
};
};
/**
* Generate Timeline related information from an event
* @param {Event} event
* @return {Object} An object with related information, like on which area
* The event happened, whether clicked on an item, etc.
*/
Graph2d.prototype.getEventProperties = function (event) {
var clientX = event.center ? event.center.x : event.clientX;
var clientY = event.center ? event.center.y : event.clientY;
var x = clientX - util.getAbsoluteLeft(this.dom.centerContainer);
var y = clientY - util.getAbsoluteTop(this.dom.centerContainer);
var time = this._toTime(x);
var customTime = CustomTime.customTimeFromTarget(event);
var element = util.getTarget(event);
var what = null;
if (util.hasParent(element, this.timeAxis.dom.foreground)) {what = 'axis';}
else if (this.timeAxis2 && util.hasParent(element, this.timeAxis2.dom.foreground)) {what = 'axis';}
else if (util.hasParent(element, this.linegraph.yAxisLeft.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.yAxisRight.dom.frame)) {what = 'data-axis';}
else if (util.hasParent(element, this.linegraph.legendLeft.dom.frame)) {what = 'legend';}
else if (util.hasParent(element, this.linegraph.legendRight.dom.frame)) {what = 'legend';}
else if (customTime != null) {what = 'custom-time';}
else if (util.hasParent(element, this.currentTime.bar)) {what = 'current-time';}
else if (util.hasParent(element, this.dom.center)) {what = 'background';}
var value = [];
var yAxisLeft = this.linegraph.yAxisLeft;
var yAxisRight = this.linegraph.yAxisRight;
if (!yAxisLeft.hidden) {
value.push(yAxisLeft.screenToValue(y));
}
if (!yAxisRight.hidden) {
value.push(yAxisRight.screenToValue(y));
}
return {
event: event,
what: what,
pageX: event.srcEvent ? event.srcEvent.pageX : event.pageX,
pageY: event.srcEvent ? event.srcEvent.pageY : event.pageY,
x: x,
y: y,
time: time,
value: value
}
};
/**
* Load a configurator
* @return {Object}
* @private
*/
Graph2d.prototype._createConfigurator = function () {
return new Configurator(this, this.dom.container, configureOptions);
};
module.exports = Graph2d;
| {
this.setItems(items);
} | conditional_block |
boost_query.rs | use crate::fastfield::AliveBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use std::collections::BTreeMap;
use std::fmt;
/// `BoostQuery` is a wrapper over a query used to boost its score.
///
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
/// The score of each document, is the score of the underlying query multiplied by the `boost`
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: Score,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
BoostQuery { query, boost }
}
}
impl Clone for BoostQuery {
fn clone(&self) -> Self {
BoostQuery {
query: self.query.box_clone(),
boost: self.boost,
}
}
}
impl fmt::Debug for BoostQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
}
}
impl Query for BoostQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
let boosted_weight = if scoring_enabled {
Box::new(BoostWeight::new(weight_without_boost, self.boost)) | }
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.query.query_terms(terms)
}
}
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: Score,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let mut explanation =
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: Score,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
impl<S: Scorer> DocSet for BoostScorer<S> {
fn advance(&mut self) -> DocId {
self.underlying.advance()
}
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.underlying.fill_buffer(buffer)
}
fn doc(&self) -> u32 {
self.underlying.doc()
}
fn size_hint(&self) -> u32 {
self.underlying.size_hint()
}
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
self.underlying.count(alive_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
self.underlying.count_including_deleted()
}
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score {
self.underlying.score() * self.boost
}
}
#[cfg(test)]
mod tests {
use super::BoostQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_boost_query_explain() -> crate::Result<()> {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::new())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}"
);
Ok(())
}
} | } else {
weight_without_boost
};
Ok(boosted_weight) | random_line_split |
boost_query.rs | use crate::fastfield::AliveBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use std::collections::BTreeMap;
use std::fmt;
/// `BoostQuery` is a wrapper over a query used to boost its score.
///
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
/// The score of each document, is the score of the underlying query multiplied by the `boost`
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: Score,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
BoostQuery { query, boost }
}
}
impl Clone for BoostQuery {
fn clone(&self) -> Self {
BoostQuery {
query: self.query.box_clone(),
boost: self.boost,
}
}
}
impl fmt::Debug for BoostQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
}
}
impl Query for BoostQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
let boosted_weight = if scoring_enabled {
Box::new(BoostWeight::new(weight_without_boost, self.boost))
} else {
weight_without_boost
};
Ok(boosted_weight)
}
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.query.query_terms(terms)
}
}
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: Score,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc |
let mut explanation =
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: Score,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
impl<S: Scorer> DocSet for BoostScorer<S> {
fn advance(&mut self) -> DocId {
self.underlying.advance()
}
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.underlying.fill_buffer(buffer)
}
fn doc(&self) -> u32 {
self.underlying.doc()
}
fn size_hint(&self) -> u32 {
self.underlying.size_hint()
}
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
self.underlying.count(alive_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
self.underlying.count_including_deleted()
}
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score {
self.underlying.score() * self.boost
}
}
#[cfg(test)]
mod tests {
use super::BoostQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_boost_query_explain() -> crate::Result<()> {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::new())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}"
);
Ok(())
}
}
| {
return Err(does_not_match(doc));
} | conditional_block |
boost_query.rs | use crate::fastfield::AliveBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use std::collections::BTreeMap;
use std::fmt;
/// `BoostQuery` is a wrapper over a query used to boost its score.
///
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
/// The score of each document, is the score of the underlying query multiplied by the `boost`
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: Score,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
BoostQuery { query, boost }
}
}
impl Clone for BoostQuery {
fn clone(&self) -> Self {
BoostQuery {
query: self.query.box_clone(),
boost: self.boost,
}
}
}
impl fmt::Debug for BoostQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
}
}
impl Query for BoostQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
let boosted_weight = if scoring_enabled {
Box::new(BoostWeight::new(weight_without_boost, self.boost))
} else {
weight_without_boost
};
Ok(boosted_weight)
}
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.query.query_terms(terms)
}
}
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: Score,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let mut explanation =
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: Score,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
impl<S: Scorer> DocSet for BoostScorer<S> {
fn advance(&mut self) -> DocId {
self.underlying.advance()
}
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.underlying.fill_buffer(buffer)
}
fn doc(&self) -> u32 |
fn size_hint(&self) -> u32 {
self.underlying.size_hint()
}
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
self.underlying.count(alive_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
self.underlying.count_including_deleted()
}
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score {
self.underlying.score() * self.boost
}
}
#[cfg(test)]
mod tests {
use super::BoostQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_boost_query_explain() -> crate::Result<()> {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::new())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}"
);
Ok(())
}
}
| {
self.underlying.doc()
} | identifier_body |
boost_query.rs | use crate::fastfield::AliveBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use std::collections::BTreeMap;
use std::fmt;
/// `BoostQuery` is a wrapper over a query used to boost its score.
///
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
/// The score of each document, is the score of the underlying query multiplied by the `boost`
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: Score,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
BoostQuery { query, boost }
}
}
impl Clone for BoostQuery {
fn clone(&self) -> Self {
BoostQuery {
query: self.query.box_clone(),
boost: self.boost,
}
}
}
impl fmt::Debug for BoostQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
}
}
impl Query for BoostQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
let boosted_weight = if scoring_enabled {
Box::new(BoostWeight::new(weight_without_boost, self.boost))
} else {
weight_without_boost
};
Ok(boosted_weight)
}
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.query.query_terms(terms)
}
}
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: Score,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let mut explanation =
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: Score,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
impl<S: Scorer> DocSet for BoostScorer<S> {
fn | (&mut self) -> DocId {
self.underlying.advance()
}
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.underlying.fill_buffer(buffer)
}
fn doc(&self) -> u32 {
self.underlying.doc()
}
fn size_hint(&self) -> u32 {
self.underlying.size_hint()
}
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
self.underlying.count(alive_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
self.underlying.count_including_deleted()
}
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score {
self.underlying.score() * self.boost
}
}
#[cfg(test)]
mod tests {
use super::BoostQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_boost_query_explain() -> crate::Result<()> {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::new())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}"
);
Ok(())
}
}
| advance | identifier_name |
tab_group_test.ts | // Copyright 2020 The Chromium Authors. All rights reserved. |
import {TabGroupElement} from 'chrome://tab-strip.top-chrome/tab_group.js';
import {TabsApiProxyImpl} from 'chrome://tab-strip.top-chrome/tabs_api_proxy.js';
import {assertEquals} from 'chrome://webui-test/chai_assert.js';
import {TestTabsApiProxy} from './test_tabs_api_proxy.js';
suite('TabGroup', () => {
const groupId = 'my-group-id';
let tabGroupElement: TabGroupElement;
let testTabsApiProxy: TestTabsApiProxy;
setup(() => {
testTabsApiProxy = new TestTabsApiProxy();
TabsApiProxyImpl.setInstance(testTabsApiProxy);
document.body.innerHTML = '';
tabGroupElement = document.createElement('tabstrip-tab-group');
tabGroupElement.dataset['groupId'] = groupId;
tabGroupElement.appendChild(document.createElement('tabstrip-tab'));
document.body.appendChild(tabGroupElement);
});
test('UpdatesVisuals', () => {
const visuals = {
color: '255, 0, 0',
textColor: '0, 0, 255',
title: 'My new title',
};
tabGroupElement.updateVisuals(visuals);
assertEquals(
visuals.title,
tabGroupElement.shadowRoot!.querySelector<HTMLElement>(
'#title')!.innerText);
assertEquals(
visuals.color,
tabGroupElement.style.getPropertyValue(
'--tabstrip-tab-group-color-rgb'));
assertEquals(
visuals.textColor,
tabGroupElement.style.getPropertyValue(
'--tabstrip-tab-group-text-color-rgb'));
});
test('DraggableChipStaysInPlace', () => {
const chip = tabGroupElement.$('#chip') as HTMLElement;
const originalChipRect = chip.getBoundingClientRect();
tabGroupElement.setDragging(true);
const newChipRect = chip.getBoundingClientRect();
assertEquals(originalChipRect.left, newChipRect.left);
assertEquals(originalChipRect.top, newChipRect.top);
assertEquals(originalChipRect.right, newChipRect.right);
assertEquals(originalChipRect.bottom, newChipRect.bottom);
});
test('DraggableChipStaysInPlaceInRTL', () => {
document.documentElement.dir = 'rtl';
const chip = tabGroupElement.$('#chip') as HTMLElement;
const originalChipRect = chip.getBoundingClientRect();
tabGroupElement.setDragging(true);
const newChipRect = chip.getBoundingClientRect();
assertEquals(originalChipRect.left, newChipRect.left);
assertEquals(originalChipRect.top, newChipRect.top);
assertEquals(originalChipRect.right, newChipRect.right);
assertEquals(originalChipRect.bottom, newChipRect.bottom);
});
test('ChipOpensEditDialog', async () => {
const chip = tabGroupElement.$('#chip') as HTMLElement;
const chipRect = chip.getBoundingClientRect();
chip.click();
const [calledGroupId, locationX, locationY, width, height] =
await testTabsApiProxy.whenCalled('showEditDialogForGroup');
assertEquals(groupId, calledGroupId);
assertEquals(chipRect.left, locationX);
assertEquals(chipRect.top, locationY);
assertEquals(chipRect.width, width);
assertEquals(chipRect.height, height);
});
}); | // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://tab-strip.top-chrome/tab.js';
import 'chrome://tab-strip.top-chrome/tab_group.js'; | random_line_split |
comparator.rs | #[cfg(test)]
mod comparator {
use libc::c_char;
use utils::{tmpdir, db_put_simple};
use leveldb::database::{Database};
use leveldb::iterator::Iterable;
use leveldb::options::{Options,ReadOptions};
use leveldb::comparator::{Comparator,OrdComparator};
use std::cmp::Ordering;
struct ReverseComparator {}
impl Comparator for ReverseComparator { | }
fn compare(&self, a: &[u8], b: &[u8]) -> Ordering {
b.cmp(a)
}
}
#[test]
fn test_comparator() {
let comparator: ReverseComparator = ReverseComparator {};
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("reverse_comparator");
let database = &mut Database::open_with_comparator(tmp.path(), opts, comparator).unwrap();
db_put_simple(database, b"1", &[1]);
db_put_simple(database, b"2", &[2]);
let read_opts = ReadOptions::new();
let mut iter = database.iter(read_opts);
assert_eq!((b"2".to_vec().as_slice(), vec![2]), iter.next().unwrap());
assert_eq!((b"1".to_vec().as_slice(), vec![1]), iter.next().unwrap());
}
#[test]
fn test_ord_comparator() {
let comparator: OrdComparator = OrdComparator::new("foo");
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("ord_comparator");
let database = &mut Database::open_with_comparator(tmp.path(), opts, comparator).unwrap();
db_put_simple(database, b"1", &[1]);
db_put_simple(database, b"2", &[2]);
let read_opts = ReadOptions::new();
let mut iter = database.iter(read_opts);
assert_eq!((b"1".to_vec().as_slice(), vec![1]), iter.next().unwrap());
assert_eq!((b"2".to_vec().as_slice(), vec![2]), iter.next().unwrap());
}
} |
fn name(&self) -> *const c_char {
"reverse".as_ptr() as *const c_char | random_line_split |
comparator.rs | #[cfg(test)]
mod comparator {
use libc::c_char;
use utils::{tmpdir, db_put_simple};
use leveldb::database::{Database};
use leveldb::iterator::Iterable;
use leveldb::options::{Options,ReadOptions};
use leveldb::comparator::{Comparator,OrdComparator};
use std::cmp::Ordering;
struct ReverseComparator {}
impl Comparator for ReverseComparator {
fn name(&self) -> *const c_char {
"reverse".as_ptr() as *const c_char
}
fn compare(&self, a: &[u8], b: &[u8]) -> Ordering {
b.cmp(a)
}
}
#[test]
fn | () {
let comparator: ReverseComparator = ReverseComparator {};
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("reverse_comparator");
let database = &mut Database::open_with_comparator(tmp.path(), opts, comparator).unwrap();
db_put_simple(database, b"1", &[1]);
db_put_simple(database, b"2", &[2]);
let read_opts = ReadOptions::new();
let mut iter = database.iter(read_opts);
assert_eq!((b"2".to_vec().as_slice(), vec![2]), iter.next().unwrap());
assert_eq!((b"1".to_vec().as_slice(), vec![1]), iter.next().unwrap());
}
#[test]
fn test_ord_comparator() {
let comparator: OrdComparator = OrdComparator::new("foo");
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("ord_comparator");
let database = &mut Database::open_with_comparator(tmp.path(), opts, comparator).unwrap();
db_put_simple(database, b"1", &[1]);
db_put_simple(database, b"2", &[2]);
let read_opts = ReadOptions::new();
let mut iter = database.iter(read_opts);
assert_eq!((b"1".to_vec().as_slice(), vec![1]), iter.next().unwrap());
assert_eq!((b"2".to_vec().as_slice(), vec![2]), iter.next().unwrap());
}
}
| test_comparator | identifier_name |
PaginationDropdown.tsx | import { DropdownItem, DropdownMenu, DropdownToggle, UncontrolledDropdown } from 'reactstrap';
interface PaginationDropdownProps {
ranges: number[];
value: number;
setValue: (newValue: number) => void;
toggleClassName?: string;
}
const PaginationDropdown = ({ toggleClassName, ranges, value, setValue }: PaginationDropdownProps) => (
<UncontrolledDropdown>
<DropdownToggle caret color="link" className={toggleClassName}> | {ranges.map((itemsPerPage) => (
<DropdownItem key={itemsPerPage} active={itemsPerPage === value} onClick={() => setValue(itemsPerPage)}>
<b>{itemsPerPage}</b> items per page
</DropdownItem>
))}
<DropdownItem divider />
<DropdownItem disabled={value === Infinity} onClick={() => setValue(Infinity)}>
<i>Clear pagination</i>
</DropdownItem>
</DropdownMenu>
</UncontrolledDropdown>
);
export default PaginationDropdown; | Paginate
</DropdownToggle>
<DropdownMenu right> | random_line_split |
manticore_protocol_spdm_GetVersion__req_to_wire.rs | // Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details. | // !! DO NOT EDIT !!
// To regenerate this file, run `fuzz/generate_proto_tests.py`.
#![no_main]
#![allow(non_snake_case)]
use libfuzzer_sys::fuzz_target;
use manticore::protocol::Command;
use manticore::protocol::wire::ToWire;
use manticore::protocol::borrowed::AsStatic;
use manticore::protocol::borrowed::Borrowed;
use manticore::protocol::spdm::GetVersion as C;
type Req<'a> = <C as Command<'a>>::Req;
fuzz_target!(|data: AsStatic<'static, Req<'static>>| {
let mut out = [0u8; 1024];
let _ = Req::borrow(&data).to_wire(&mut &mut out[..]);
}); | // SPDX-License-Identifier: Apache-2.0
| random_line_split |
ArrayUtils.ts | class ArrayUtils {
static CASEINSENSITIVE = 1;
static DESCENDING = 2;
static UNIQUESORT = 4;
static RETURNINDEXEDARRAY = 8;
static NUMERIC = 16;
/**
* Checks if an array contains a specific value
*/
public static inArray( array:any[], value:any ):boolean{
return (array.indexOf( value ) != -1);
}
/**
* Checks if an element in the array has a field with a specific value
*/
public static inArrayField( array:any[], field:string, value:any ):boolean{
for( var i = 0; i < array.length; i++ ){
if( array[i][field] == value ) return true;
}
return false;
}
/**
* Get a random element form the array
*/
public static randomElement( array:any[] ):any{
if( array.length > 0 ){
return array[Math.floor( Math.random() * array.length )];
}
return null;
}
/**
* Shuffles an array (sort random)
*/
public static shuffle( array:any[] ):void{
var i:number = array.length;
if( i == 0 ){
return;
}
var j:number;
var temp:any;
while( --i ) {
j = Math.floor( Math.random() * (i + 1) );
temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
/**
* copies the source array to the target array, without remove the reference
*/
public static copy( array:any[], target:any[] ):void{
var leni:number = target.length = array.length;
for( var i:number = 0; i < leni; i++ ){
target[i] = array[i];
}
}
/**
* recursively clone an Array and it's sub-Array's (doesn't clone content objects)
*/
public static deepArrayClone( array:any[] ):any[]{
var ret:any[] = array.concat();
var iLim:number = ret.length;
var i:number;
for( i = 0; i < iLim; i++ ){
if( ret[i] instanceof Array ){
ret[i] = ArrayUtils.deepArrayClone( ret[i] );
}
}
return ret;
}
/**
* Calculates the average value of all elements in an array
* Works only for array's with numeric values
*/
public static average( array:any[] ):number{
if( array == null || array.length == 0 ) return NaN;
var total:number = 0;
for( var i = 0; i < array.length; i++ ){
total += array[i];
}
return total / array.length;
}
/**
* Remove all instances of the specified value from the array,
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return the number of removed items
*/
public static removeValueFromArray( array:any[], value:any ):number{
var total:number = 0;
for( var i:number = array.length - 1; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
total++;
}
}
return total;
}
/**
* Removes a single (first occurring) value from an Array.
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return a boolean which indicates if a value is removed
*/
public static removeValueFromArrayOnce( array:any[], value:any ):boolean{
var len:number = array.length;
for( var i:number = len; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
return true;
}
}
return false;
}
/**
* Create a new array that only contains unique instances of objects
* in the specified array.
*
* <p>Basically, this can be used to remove duplication object instances
* from an array</p>
*
* @param array The array which contains the values that will be used to
* create the new array that contains no duplicate values.
*
* @return A new array which only contains unique items from the specified
* array.
*/
public static createUniqueCopy( array:any[] ):any[]{
var newArray:any[] = [];
var len:number = array.length;
var item:any;
for( var i:number = 0; i < len; ++i ){
item = array[i];
if( ArrayUtils.inArray( newArray, item ) ){
continue;
}
newArray.push( item );
}
return newArray;
}
/**
* Creates a copy of the specified array.
*
* <p>Note that the array returned is a new array but the items within the
* array are not copies of the items in the original array (but rather
* references to the same items)</p>
*
* @param array The array that will be cloned.
*
* @return A new array which contains the same items as the array passed
* in.
*/
public static clone( array:any[] ):any[]{
return array.slice( 0, array.length );
}
/**
* Compares two arrays and returns a boolean indicating whether the arrays
* contain the same values at the same indexes.
*
* @param array1 The first array that will be compared to the second.
* @param array2 The second array that will be compared to the first.
*
* @return True if the arrays contains the same values at the same indexes.
* False if they do not.
*/
public static areEqual( array1:any[], array2:any[] ):boolean{
if( array1 == array2 ){
return true;
}
if( array1.length != array2.length ){
return false;
}
for( var i:number = array1.length - 1; i >= 0; --i ){
if( array1[i] != array2[i] ){
return false;
}
}
return true;
}
/**
* Returns the amount of (not empty) items in an Array.
*/
public static filledLength( array:any[] ):number{
var length:number = 0;
var leni:number = array.length;
for( var i:number = 0; i < leni; i++ ){
if( array[i] != undefined ) length++;
}
return length;
}
/**
* Returs the items that are unique in the first array
*/
public static getUniqueFirst( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
for( var i:number = 0; i < array1.length; i++ ){
if( array2.indexOf( array1[i] ) == -1 ) ret.push( array1[i] );
}
return ret;
}
/**
* Returs the items that are in both arrays
*/
public static intersect( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
var i:number;
for( i = 0; i < array1.length; i++ ){
if( array2.indexOf( array1[i] ) != -1 ) ret.push( array1[i] );
}
for( i = 0; i < array2.length; i++ ){
if( array1.indexOf( array2[i] ) != -1 ) ret.push( array2[i] );
}
ret = ArrayUtils.createUniqueCopy( ret );
return ret;
}
/**
* Adds an element to an Array
* @param element the element to add
* @param amount number of times the element must be added
* @param array the array where the element is added to. If null, a new Array is created
*
* @return the array or the newly create array, with the element
*/
public static addElements( element:any, amount:number, array:any[] = null ):any[] |
/**
* Simple joins a Array to a String
*/
public static simpleJoin( array:any[], sort:boolean = true, pre:string = ' - ', post:string = '\n', empty:string = '(empty)' ):string{
if( !array ){
return '(null array)';
}
if( array.length == 0 ){
return empty;
}
if( sort ){
array = array.concat().sort();
}
return pre + array.join( post + pre ) + post;
}
/**
* Returns a new Array from an Array without the empty (null, '' or undefined) elements.
*/
public static removeEmptyElements( array:any[] ):any[]{
var results:any[] = [];
for( var i:number = 0; i < array.length; ++i ){
if( array[i] != '' && array[i] != null && array[i] != undefined ) results.push( array[i] );
}
return results;
}
/*
---
script: Array.sortOn.js
description: Adds Array.sortOn function and related constants that works like in ActionScript for sorting arrays of objects (applying all same strict rules)
license: MIT-style license.
authors:
- gonchuki
github: https://github.com/gonchuki/mootools-Array.sortOn/blob/master/Source/Array.sortOn.js
docs: http://www.adobe.com/livedocs/flash/9.0/ActionScriptLangRefV3/Array.html#sortOn()
requires:
- core/1.2.4: [Array]
provides:
- [sortOn, CASEINSENSITIVE, DESCENDING, UNIQUESORT, RETURNINDEXEDARRAY, NUMERIC]
...
*/
public static sortOn(array:any[], fields, options?:any):any
{
var dup_fn = function(field, field_options) {
var filtered = (field_options & ArrayUtils.NUMERIC)
? this.map(function(item) {return item[field].toFloat(); })
: (field_options & ArrayUtils.CASEINSENSITIVE)
? this.map(function(item) {return item[field].toLowerCase(); })
: this.map(function(item) {return item[field]; });
return filtered.length !== []['combine'](filtered).length;
};
var sort_fn = function(item_a, item_b, fields, options) {
return (function sort_by(fields, options) {
var ret, a, b,
opts = options[0],
sub_fields = fields[0].match(/[^.]+/g);
(function get_values(s_fields, s_a, s_b) {
var field = s_fields[0];
if (s_fields.length > 1) {
get_values(s_fields.slice(1), s_a[field], s_b[field]);
} else {
a = s_a[field].toString();
b = s_b[field].toString();
}
})(sub_fields, item_a, item_b);
if (opts & ArrayUtils.NUMERIC) {
ret = (a.toFloat() - b.toFloat());
} else {
if (opts & ArrayUtils.CASEINSENSITIVE) { a = a.toLowerCase(); b = b.toLowerCase(); }
ret = (a > b) ? 1 : (a < b) ? -1 : 0;
}
if ((ret === 0) && (fields.length > 1)) {
ret = sort_by(fields.slice(1), options.slice(1));
} else if (opts & ArrayUtils.DESCENDING) {
ret *= -1;
}
return ret;
})(fields, options);
};
fields = Array['from'](fields);
options = Array['from'](options);
if (options.length !== fields.length) options = [];
if ((options[0] & ArrayUtils.UNIQUESORT) && (fields.some(function(field, i){return dup_fn(field, options[i]);}))) return 0;
var curry_sort = function(item_a, item_b) {
return sort_fn(item_a, item_b, fields, options);
};
if (options[0] & ArrayUtils.RETURNINDEXEDARRAY)
{
return array.concat().sort(curry_sort);
}
else
{
return array.sort(curry_sort);
}
}
}
export = ArrayUtils;
| {
if( !array ) array = [];
for( var i:number = 0; i < amount; i++ ){
array.push( element );
}
return array;
} | identifier_body |
ArrayUtils.ts | class ArrayUtils {
static CASEINSENSITIVE = 1;
static DESCENDING = 2;
static UNIQUESORT = 4;
static RETURNINDEXEDARRAY = 8;
static NUMERIC = 16;
/**
* Checks if an array contains a specific value
*/
public static inArray( array:any[], value:any ):boolean{
return (array.indexOf( value ) != -1);
}
/**
* Checks if an element in the array has a field with a specific value
*/
public static inArrayField( array:any[], field:string, value:any ):boolean{
for( var i = 0; i < array.length; i++ ){
if( array[i][field] == value ) return true;
}
return false;
}
/**
* Get a random element form the array
*/
public static randomElement( array:any[] ):any{
if( array.length > 0 ){
return array[Math.floor( Math.random() * array.length )];
}
return null;
}
/**
* Shuffles an array (sort random)
*/
public static shuffle( array:any[] ):void{
var i:number = array.length;
if( i == 0 ){
return;
}
var j:number;
var temp:any;
while( --i ) {
j = Math.floor( Math.random() * (i + 1) );
temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
/**
* copies the source array to the target array, without remove the reference
*/
public static copy( array:any[], target:any[] ):void{
var leni:number = target.length = array.length;
for( var i:number = 0; i < leni; i++ ){
target[i] = array[i];
}
}
/**
* recursively clone an Array and it's sub-Array's (doesn't clone content objects)
*/
public static deepArrayClone( array:any[] ):any[]{
var ret:any[] = array.concat();
var iLim:number = ret.length;
var i:number;
for( i = 0; i < iLim; i++ ){
if( ret[i] instanceof Array ){
ret[i] = ArrayUtils.deepArrayClone( ret[i] );
}
}
return ret;
}
/**
* Calculates the average value of all elements in an array
* Works only for array's with numeric values
*/
public static average( array:any[] ):number{
if( array == null || array.length == 0 ) return NaN;
var total:number = 0;
for( var i = 0; i < array.length; i++ ){
total += array[i];
}
return total / array.length;
}
/**
* Remove all instances of the specified value from the array,
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return the number of removed items
*/
public static removeValueFromArray( array:any[], value:any ):number{
var total:number = 0;
for( var i:number = array.length - 1; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
total++;
}
}
return total;
}
/**
* Removes a single (first occurring) value from an Array.
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return a boolean which indicates if a value is removed
*/
public static | ( array:any[], value:any ):boolean{
var len:number = array.length;
for( var i:number = len; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
return true;
}
}
return false;
}
/**
* Create a new array that only contains unique instances of objects
* in the specified array.
*
* <p>Basically, this can be used to remove duplication object instances
* from an array</p>
*
* @param array The array which contains the values that will be used to
* create the new array that contains no duplicate values.
*
* @return A new array which only contains unique items from the specified
* array.
*/
public static createUniqueCopy( array:any[] ):any[]{
var newArray:any[] = [];
var len:number = array.length;
var item:any;
for( var i:number = 0; i < len; ++i ){
item = array[i];
if( ArrayUtils.inArray( newArray, item ) ){
continue;
}
newArray.push( item );
}
return newArray;
}
/**
* Creates a copy of the specified array.
*
* <p>Note that the array returned is a new array but the items within the
* array are not copies of the items in the original array (but rather
* references to the same items)</p>
*
* @param array The array that will be cloned.
*
* @return A new array which contains the same items as the array passed
* in.
*/
public static clone( array:any[] ):any[]{
return array.slice( 0, array.length );
}
/**
* Compares two arrays and returns a boolean indicating whether the arrays
* contain the same values at the same indexes.
*
* @param array1 The first array that will be compared to the second.
* @param array2 The second array that will be compared to the first.
*
* @return True if the arrays contains the same values at the same indexes.
* False if they do not.
*/
public static areEqual( array1:any[], array2:any[] ):boolean{
if( array1 == array2 ){
return true;
}
if( array1.length != array2.length ){
return false;
}
for( var i:number = array1.length - 1; i >= 0; --i ){
if( array1[i] != array2[i] ){
return false;
}
}
return true;
}
/**
* Returns the amount of (not empty) items in an Array.
*/
public static filledLength( array:any[] ):number{
var length:number = 0;
var leni:number = array.length;
for( var i:number = 0; i < leni; i++ ){
if( array[i] != undefined ) length++;
}
return length;
}
/**
* Returs the items that are unique in the first array
*/
public static getUniqueFirst( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
for( var i:number = 0; i < array1.length; i++ ){
if( array2.indexOf( array1[i] ) == -1 ) ret.push( array1[i] );
}
return ret;
}
/**
* Returs the items that are in both arrays
*/
public static intersect( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
var i:number;
for( i = 0; i < array1.length; i++ ){
if( array2.indexOf( array1[i] ) != -1 ) ret.push( array1[i] );
}
for( i = 0; i < array2.length; i++ ){
if( array1.indexOf( array2[i] ) != -1 ) ret.push( array2[i] );
}
ret = ArrayUtils.createUniqueCopy( ret );
return ret;
}
/**
* Adds an element to an Array
* @param element the element to add
* @param amount number of times the element must be added
* @param array the array where the element is added to. If null, a new Array is created
*
* @return the array or the newly create array, with the element
*/
public static addElements( element:any, amount:number, array:any[] = null ):any[]{
if( !array ) array = [];
for( var i:number = 0; i < amount; i++ ){
array.push( element );
}
return array;
}
/**
* Simple joins a Array to a String
*/
public static simpleJoin( array:any[], sort:boolean = true, pre:string = ' - ', post:string = '\n', empty:string = '(empty)' ):string{
if( !array ){
return '(null array)';
}
if( array.length == 0 ){
return empty;
}
if( sort ){
array = array.concat().sort();
}
return pre + array.join( post + pre ) + post;
}
/**
* Returns a new Array from an Array without the empty (null, '' or undefined) elements.
*/
public static removeEmptyElements( array:any[] ):any[]{
var results:any[] = [];
for( var i:number = 0; i < array.length; ++i ){
if( array[i] != '' && array[i] != null && array[i] != undefined ) results.push( array[i] );
}
return results;
}
/*
---
script: Array.sortOn.js
description: Adds Array.sortOn function and related constants that works like in ActionScript for sorting arrays of objects (applying all same strict rules)
license: MIT-style license.
authors:
- gonchuki
github: https://github.com/gonchuki/mootools-Array.sortOn/blob/master/Source/Array.sortOn.js
docs: http://www.adobe.com/livedocs/flash/9.0/ActionScriptLangRefV3/Array.html#sortOn()
requires:
- core/1.2.4: [Array]
provides:
- [sortOn, CASEINSENSITIVE, DESCENDING, UNIQUESORT, RETURNINDEXEDARRAY, NUMERIC]
...
*/
public static sortOn(array:any[], fields, options?:any):any
{
var dup_fn = function(field, field_options) {
var filtered = (field_options & ArrayUtils.NUMERIC)
? this.map(function(item) {return item[field].toFloat(); })
: (field_options & ArrayUtils.CASEINSENSITIVE)
? this.map(function(item) {return item[field].toLowerCase(); })
: this.map(function(item) {return item[field]; });
return filtered.length !== []['combine'](filtered).length;
};
var sort_fn = function(item_a, item_b, fields, options) {
return (function sort_by(fields, options) {
var ret, a, b,
opts = options[0],
sub_fields = fields[0].match(/[^.]+/g);
(function get_values(s_fields, s_a, s_b) {
var field = s_fields[0];
if (s_fields.length > 1) {
get_values(s_fields.slice(1), s_a[field], s_b[field]);
} else {
a = s_a[field].toString();
b = s_b[field].toString();
}
})(sub_fields, item_a, item_b);
if (opts & ArrayUtils.NUMERIC) {
ret = (a.toFloat() - b.toFloat());
} else {
if (opts & ArrayUtils.CASEINSENSITIVE) { a = a.toLowerCase(); b = b.toLowerCase(); }
ret = (a > b) ? 1 : (a < b) ? -1 : 0;
}
if ((ret === 0) && (fields.length > 1)) {
ret = sort_by(fields.slice(1), options.slice(1));
} else if (opts & ArrayUtils.DESCENDING) {
ret *= -1;
}
return ret;
})(fields, options);
};
fields = Array['from'](fields);
options = Array['from'](options);
if (options.length !== fields.length) options = [];
if ((options[0] & ArrayUtils.UNIQUESORT) && (fields.some(function(field, i){return dup_fn(field, options[i]);}))) return 0;
var curry_sort = function(item_a, item_b) {
return sort_fn(item_a, item_b, fields, options);
};
if (options[0] & ArrayUtils.RETURNINDEXEDARRAY)
{
return array.concat().sort(curry_sort);
}
else
{
return array.sort(curry_sort);
}
}
}
export = ArrayUtils;
| removeValueFromArrayOnce | identifier_name |
ArrayUtils.ts | class ArrayUtils {
static CASEINSENSITIVE = 1;
static DESCENDING = 2;
static UNIQUESORT = 4;
static RETURNINDEXEDARRAY = 8;
static NUMERIC = 16;
/**
* Checks if an array contains a specific value
*/
public static inArray( array:any[], value:any ):boolean{
return (array.indexOf( value ) != -1);
}
/**
* Checks if an element in the array has a field with a specific value
*/
public static inArrayField( array:any[], field:string, value:any ):boolean{
for( var i = 0; i < array.length; i++ ){
if( array[i][field] == value ) return true;
}
return false;
}
/**
* Get a random element form the array
*/
public static randomElement( array:any[] ):any{
if( array.length > 0 ){
return array[Math.floor( Math.random() * array.length )];
}
return null;
}
/**
* Shuffles an array (sort random)
*/
public static shuffle( array:any[] ):void{
var i:number = array.length;
if( i == 0 ){
return;
}
var j:number;
var temp:any;
while( --i ) {
j = Math.floor( Math.random() * (i + 1) );
temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
/**
* copies the source array to the target array, without remove the reference
*/
public static copy( array:any[], target:any[] ):void{
var leni:number = target.length = array.length;
for( var i:number = 0; i < leni; i++ ){
target[i] = array[i];
}
}
/**
* recursively clone an Array and it's sub-Array's (doesn't clone content objects)
*/
public static deepArrayClone( array:any[] ):any[]{
var ret:any[] = array.concat();
var iLim:number = ret.length;
var i:number;
for( i = 0; i < iLim; i++ ){
if( ret[i] instanceof Array ){
ret[i] = ArrayUtils.deepArrayClone( ret[i] );
}
}
return ret;
}
/**
* Calculates the average value of all elements in an array
* Works only for array's with numeric values
*/
public static average( array:any[] ):number{
if( array == null || array.length == 0 ) return NaN;
var total:number = 0;
for( var i = 0; i < array.length; i++ ){
total += array[i];
}
return total / array.length;
}
/**
* Remove all instances of the specified value from the array,
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return the number of removed items
*/
public static removeValueFromArray( array:any[], value:any ):number{
var total:number = 0;
for( var i:number = array.length - 1; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
total++;
}
}
return total;
}
/**
* Removes a single (first occurring) value from an Array.
* @param array The array from which the value will be removed
* @param value The item that will be removed from the array.
*
* @return a boolean which indicates if a value is removed
*/
public static removeValueFromArrayOnce( array:any[], value:any ):boolean{
var len:number = array.length;
for( var i:number = len; i > -1; i-- ){
if( array[i] === value ){
array.splice( i, 1 );
return true;
}
}
return false;
}
/**
* Create a new array that only contains unique instances of objects
* in the specified array.
*
* <p>Basically, this can be used to remove duplication object instances
* from an array</p>
*
* @param array The array which contains the values that will be used to
* create the new array that contains no duplicate values.
*
* @return A new array which only contains unique items from the specified
* array.
*/
public static createUniqueCopy( array:any[] ):any[]{
var newArray:any[] = [];
var len:number = array.length;
var item:any;
for( var i:number = 0; i < len; ++i ){
item = array[i];
if( ArrayUtils.inArray( newArray, item ) ){
continue;
}
newArray.push( item );
}
return newArray;
}
/**
* Creates a copy of the specified array.
*
* <p>Note that the array returned is a new array but the items within the
* array are not copies of the items in the original array (but rather
* references to the same items)</p>
*
* @param array The array that will be cloned.
*
* @return A new array which contains the same items as the array passed
* in.
*/
public static clone( array:any[] ):any[]{
return array.slice( 0, array.length );
}
/**
* Compares two arrays and returns a boolean indicating whether the arrays
* contain the same values at the same indexes.
*
* @param array1 The first array that will be compared to the second.
* @param array2 The second array that will be compared to the first.
*
* @return True if the arrays contains the same values at the same indexes.
* False if they do not.
*/
public static areEqual( array1:any[], array2:any[] ):boolean{
if( array1 == array2 ){
return true;
}
if( array1.length != array2.length ){
return false;
}
for( var i:number = array1.length - 1; i >= 0; --i ){
if( array1[i] != array2[i] ){
return false;
}
}
return true;
}
/**
* Returns the amount of (not empty) items in an Array.
*/
public static filledLength( array:any[] ):number{
var length:number = 0;
var leni:number = array.length;
for( var i:number = 0; i < leni; i++ ){
if( array[i] != undefined ) length++;
}
return length;
}
/**
* Returs the items that are unique in the first array | if( array2.indexOf( array1[i] ) == -1 ) ret.push( array1[i] );
}
return ret;
}
/**
* Returs the items that are in both arrays
*/
public static intersect( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
var i:number;
for( i = 0; i < array1.length; i++ ){
if( array2.indexOf( array1[i] ) != -1 ) ret.push( array1[i] );
}
for( i = 0; i < array2.length; i++ ){
if( array1.indexOf( array2[i] ) != -1 ) ret.push( array2[i] );
}
ret = ArrayUtils.createUniqueCopy( ret );
return ret;
}
/**
* Adds an element to an Array
* @param element the element to add
* @param amount number of times the element must be added
* @param array the array where the element is added to. If null, a new Array is created
*
* @return the array or the newly create array, with the element
*/
public static addElements( element:any, amount:number, array:any[] = null ):any[]{
if( !array ) array = [];
for( var i:number = 0; i < amount; i++ ){
array.push( element );
}
return array;
}
/**
* Simple joins a Array to a String
*/
public static simpleJoin( array:any[], sort:boolean = true, pre:string = ' - ', post:string = '\n', empty:string = '(empty)' ):string{
if( !array ){
return '(null array)';
}
if( array.length == 0 ){
return empty;
}
if( sort ){
array = array.concat().sort();
}
return pre + array.join( post + pre ) + post;
}
/**
* Returns a new Array from an Array without the empty (null, '' or undefined) elements.
*/
public static removeEmptyElements( array:any[] ):any[]{
var results:any[] = [];
for( var i:number = 0; i < array.length; ++i ){
if( array[i] != '' && array[i] != null && array[i] != undefined ) results.push( array[i] );
}
return results;
}
/*
---
script: Array.sortOn.js
description: Adds Array.sortOn function and related constants that works like in ActionScript for sorting arrays of objects (applying all same strict rules)
license: MIT-style license.
authors:
- gonchuki
github: https://github.com/gonchuki/mootools-Array.sortOn/blob/master/Source/Array.sortOn.js
docs: http://www.adobe.com/livedocs/flash/9.0/ActionScriptLangRefV3/Array.html#sortOn()
requires:
- core/1.2.4: [Array]
provides:
- [sortOn, CASEINSENSITIVE, DESCENDING, UNIQUESORT, RETURNINDEXEDARRAY, NUMERIC]
...
*/
public static sortOn(array:any[], fields, options?:any):any
{
var dup_fn = function(field, field_options) {
var filtered = (field_options & ArrayUtils.NUMERIC)
? this.map(function(item) {return item[field].toFloat(); })
: (field_options & ArrayUtils.CASEINSENSITIVE)
? this.map(function(item) {return item[field].toLowerCase(); })
: this.map(function(item) {return item[field]; });
return filtered.length !== []['combine'](filtered).length;
};
var sort_fn = function(item_a, item_b, fields, options) {
return (function sort_by(fields, options) {
var ret, a, b,
opts = options[0],
sub_fields = fields[0].match(/[^.]+/g);
(function get_values(s_fields, s_a, s_b) {
var field = s_fields[0];
if (s_fields.length > 1) {
get_values(s_fields.slice(1), s_a[field], s_b[field]);
} else {
a = s_a[field].toString();
b = s_b[field].toString();
}
})(sub_fields, item_a, item_b);
if (opts & ArrayUtils.NUMERIC) {
ret = (a.toFloat() - b.toFloat());
} else {
if (opts & ArrayUtils.CASEINSENSITIVE) { a = a.toLowerCase(); b = b.toLowerCase(); }
ret = (a > b) ? 1 : (a < b) ? -1 : 0;
}
if ((ret === 0) && (fields.length > 1)) {
ret = sort_by(fields.slice(1), options.slice(1));
} else if (opts & ArrayUtils.DESCENDING) {
ret *= -1;
}
return ret;
})(fields, options);
};
fields = Array['from'](fields);
options = Array['from'](options);
if (options.length !== fields.length) options = [];
if ((options[0] & ArrayUtils.UNIQUESORT) && (fields.some(function(field, i){return dup_fn(field, options[i]);}))) return 0;
var curry_sort = function(item_a, item_b) {
return sort_fn(item_a, item_b, fields, options);
};
if (options[0] & ArrayUtils.RETURNINDEXEDARRAY)
{
return array.concat().sort(curry_sort);
}
else
{
return array.sort(curry_sort);
}
}
}
export = ArrayUtils; | */
public static getUniqueFirst( array1:any[], array2:any[] ):any[]{
var ret:any[] = [];
for( var i:number = 0; i < array1.length; i++ ){ | random_line_split |
receiver.js | let connectionIdx = 0;
let messageIdx = 0;
function addConnection(connection) {
connection.connectionId = ++connectionIdx;
addMessage('New connection #' + connectionIdx);
connection.addEventListener('message', function(event) {
messageIdx++;
const data = JSON.parse(event.data);
const logString = 'Message ' + messageIdx + ' from connection #' +
connection.connectionId + ': ' + data.message;
addMessage(logString, data.lang);
maybeSetFruit(data.message);
connection.send('Received message ' + messageIdx);
});
connection.addEventListener('close', function(event) {
addMessage('Connection #' + connection.connectionId + ' closed, reason = ' +
event.reason + ', message = ' + event.message);
});
};
/* Utils */
const fruitEmoji = {
'grapes': '\u{1F347}',
'watermelon': '\u{1F349}',
'melon': '\u{1F348}',
'tangerine': '\u{1F34A}',
'lemon': '\u{1F34B}',
'banana': '\u{1F34C}',
'pineapple': '\u{1F34D}',
'green apple': '\u{1F35F}',
'apple': '\u{1F34E}',
'pear': '\u{1F350}',
'peach': '\u{1F351}',
'cherries': '\u{1F352}',
'strawberry': '\u{1F353}'
};
function addMessage(content, language) {
const listItem = document.createElement("li");
if (language) {
listItem.lang = language;
}
listItem.textContent = content;
document.querySelector("#message-list").appendChild(listItem);
};
function | (message) {
const fruit = message.toLowerCase();
if (fruit in fruitEmoji) {
document.querySelector('#main').textContent = fruitEmoji[fruit];
}
};
document.addEventListener('DOMContentLoaded', function() {
if (navigator.presentation.receiver) {
navigator.presentation.receiver.connectionList.then(list => {
list.connections.map(connection => addConnection(connection));
list.addEventListener('connectionavailable', function(event) {
addConnection(event.connection);
});
});
}
});
| maybeSetFruit | identifier_name |
receiver.js | let connectionIdx = 0;
let messageIdx = 0;
function addConnection(connection) {
connection.connectionId = ++connectionIdx;
addMessage('New connection #' + connectionIdx);
connection.addEventListener('message', function(event) {
messageIdx++;
const data = JSON.parse(event.data);
const logString = 'Message ' + messageIdx + ' from connection #' +
connection.connectionId + ': ' + data.message;
addMessage(logString, data.lang);
maybeSetFruit(data.message);
connection.send('Received message ' + messageIdx);
});
connection.addEventListener('close', function(event) {
addMessage('Connection #' + connection.connectionId + ' closed, reason = ' +
event.reason + ', message = ' + event.message);
});
};
/* Utils */
const fruitEmoji = {
'grapes': '\u{1F347}',
'watermelon': '\u{1F349}',
'melon': '\u{1F348}',
'tangerine': '\u{1F34A}',
'lemon': '\u{1F34B}',
'banana': '\u{1F34C}',
'pineapple': '\u{1F34D}',
'green apple': '\u{1F35F}',
'apple': '\u{1F34E}',
'pear': '\u{1F350}',
'peach': '\u{1F351}',
'cherries': '\u{1F352}',
'strawberry': '\u{1F353}'
};
function addMessage(content, language) {
const listItem = document.createElement("li");
if (language) {
listItem.lang = language;
}
listItem.textContent = content;
document.querySelector("#message-list").appendChild(listItem);
};
function maybeSetFruit(message) {
const fruit = message.toLowerCase();
if (fruit in fruitEmoji) {
document.querySelector('#main').textContent = fruitEmoji[fruit];
}
}; |
document.addEventListener('DOMContentLoaded', function() {
if (navigator.presentation.receiver) {
navigator.presentation.receiver.connectionList.then(list => {
list.connections.map(connection => addConnection(connection));
list.addEventListener('connectionavailable', function(event) {
addConnection(event.connection);
});
});
}
}); | random_line_split |
|
receiver.js | let connectionIdx = 0;
let messageIdx = 0;
function addConnection(connection) {
connection.connectionId = ++connectionIdx;
addMessage('New connection #' + connectionIdx);
connection.addEventListener('message', function(event) {
messageIdx++;
const data = JSON.parse(event.data);
const logString = 'Message ' + messageIdx + ' from connection #' +
connection.connectionId + ': ' + data.message;
addMessage(logString, data.lang);
maybeSetFruit(data.message);
connection.send('Received message ' + messageIdx);
});
connection.addEventListener('close', function(event) {
addMessage('Connection #' + connection.connectionId + ' closed, reason = ' +
event.reason + ', message = ' + event.message);
});
};
/* Utils */
const fruitEmoji = {
'grapes': '\u{1F347}',
'watermelon': '\u{1F349}',
'melon': '\u{1F348}',
'tangerine': '\u{1F34A}',
'lemon': '\u{1F34B}',
'banana': '\u{1F34C}',
'pineapple': '\u{1F34D}',
'green apple': '\u{1F35F}',
'apple': '\u{1F34E}',
'pear': '\u{1F350}',
'peach': '\u{1F351}',
'cherries': '\u{1F352}',
'strawberry': '\u{1F353}'
};
function addMessage(content, language) | ;
function maybeSetFruit(message) {
const fruit = message.toLowerCase();
if (fruit in fruitEmoji) {
document.querySelector('#main').textContent = fruitEmoji[fruit];
}
};
document.addEventListener('DOMContentLoaded', function() {
if (navigator.presentation.receiver) {
navigator.presentation.receiver.connectionList.then(list => {
list.connections.map(connection => addConnection(connection));
list.addEventListener('connectionavailable', function(event) {
addConnection(event.connection);
});
});
}
});
| {
const listItem = document.createElement("li");
if (language) {
listItem.lang = language;
}
listItem.textContent = content;
document.querySelector("#message-list").appendChild(listItem);
} | identifier_body |
receiver.js | let connectionIdx = 0;
let messageIdx = 0;
function addConnection(connection) {
connection.connectionId = ++connectionIdx;
addMessage('New connection #' + connectionIdx);
connection.addEventListener('message', function(event) {
messageIdx++;
const data = JSON.parse(event.data);
const logString = 'Message ' + messageIdx + ' from connection #' +
connection.connectionId + ': ' + data.message;
addMessage(logString, data.lang);
maybeSetFruit(data.message);
connection.send('Received message ' + messageIdx);
});
connection.addEventListener('close', function(event) {
addMessage('Connection #' + connection.connectionId + ' closed, reason = ' +
event.reason + ', message = ' + event.message);
});
};
/* Utils */
const fruitEmoji = {
'grapes': '\u{1F347}',
'watermelon': '\u{1F349}',
'melon': '\u{1F348}',
'tangerine': '\u{1F34A}',
'lemon': '\u{1F34B}',
'banana': '\u{1F34C}',
'pineapple': '\u{1F34D}',
'green apple': '\u{1F35F}',
'apple': '\u{1F34E}',
'pear': '\u{1F350}',
'peach': '\u{1F351}',
'cherries': '\u{1F352}',
'strawberry': '\u{1F353}'
};
function addMessage(content, language) {
const listItem = document.createElement("li");
if (language) {
listItem.lang = language;
}
listItem.textContent = content;
document.querySelector("#message-list").appendChild(listItem);
};
function maybeSetFruit(message) {
const fruit = message.toLowerCase();
if (fruit in fruitEmoji) |
};
document.addEventListener('DOMContentLoaded', function() {
if (navigator.presentation.receiver) {
navigator.presentation.receiver.connectionList.then(list => {
list.connections.map(connection => addConnection(connection));
list.addEventListener('connectionavailable', function(event) {
addConnection(event.connection);
});
});
}
});
| {
document.querySelector('#main').textContent = fruitEmoji[fruit];
} | conditional_block |
loader.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Finds crate binaries and loads their metadata
use back::archive::{ArchiveRO, METADATA_FILENAME};
use back::svh::Svh;
use driver::session::Session;
use lib::llvm::{False, llvm, ObjectFile, mk_section_iter};
use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive};
use metadata::decoder;
use metadata::encoder;
use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
use syntax::codemap::Span;
use syntax::diagnostic::SpanHandler;
use syntax::crateid::CrateId;
use syntax::attr::AttrMetaMethods;
use util::fs;
use std::c_str::ToCStr;
use std::cast;
use std::cmp;
use std::io;
use std::ptr;
use std::slice;
use std::str; |
use collections::{HashMap, HashSet};
use flate;
use time;
pub static MACOS_DLL_PREFIX: &'static str = "lib";
pub static MACOS_DLL_SUFFIX: &'static str = ".dylib";
pub static WIN32_DLL_PREFIX: &'static str = "";
pub static WIN32_DLL_SUFFIX: &'static str = ".dll";
pub static LINUX_DLL_PREFIX: &'static str = "lib";
pub static LINUX_DLL_SUFFIX: &'static str = ".so";
pub static FREEBSD_DLL_PREFIX: &'static str = "lib";
pub static FREEBSD_DLL_SUFFIX: &'static str = ".so";
pub static ANDROID_DLL_PREFIX: &'static str = "lib";
pub static ANDROID_DLL_SUFFIX: &'static str = ".so";
pub enum Os {
OsMacos,
OsWin32,
OsLinux,
OsAndroid,
OsFreebsd
}
pub struct CrateMismatch {
path: Path,
got: ~str,
}
pub struct Context<'a> {
pub sess: &'a Session,
pub span: Span,
pub ident: &'a str,
pub crate_id: &'a CrateId,
pub id_hash: &'a str,
pub hash: Option<&'a Svh>,
pub triple: &'a str,
pub os: Os,
pub filesearch: FileSearch<'a>,
pub root: &'a Option<CratePaths>,
pub rejected_via_hash: Vec<CrateMismatch>,
pub rejected_via_triple: Vec<CrateMismatch>,
}
pub struct Library {
pub dylib: Option<Path>,
pub rlib: Option<Path>,
pub metadata: MetadataBlob,
}
pub struct ArchiveMetadata {
archive: ArchiveRO,
// See comments in ArchiveMetadata::new for why this is static
data: &'static [u8],
}
pub struct CratePaths {
pub ident: ~str,
pub dylib: Option<Path>,
pub rlib: Option<Path>
}
impl CratePaths {
fn paths(&self) -> Vec<Path> {
match (&self.dylib, &self.rlib) {
(&None, &None) => vec!(),
(&Some(ref p), &None) |
(&None, &Some(ref p)) => vec!(p.clone()),
(&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()),
}
}
}
impl<'a> Context<'a> {
pub fn maybe_load_library_crate(&mut self) -> Option<Library> {
self.find_library_crate()
}
pub fn load_library_crate(&mut self) -> Library {
match self.find_library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message);
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span, format!("expected triple of {}", self.triple));
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()));
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()));
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()));
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix) && file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix, ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix) && file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file, dylib_prefix, dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name));
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<~str>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice() != vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_owned())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_owned())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found", flavor, self.crate_id.name));
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref().display()));
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()));
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice() != self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch{ path: libpath.clone(),
got: triple.to_owned() });
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash != hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch{ path: libpath.clone(),
got: myhash.as_str().to_owned() });
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()));
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and
// store the data as a static buffer.
unsafe { cast::transmute(data) }
};
Some(ArchiveMetadata {
archive: ar,
data: data,
})
}
pub fn as_slice<'a>(&'a self) -> &'a [u8] { self.data }
}
// Just a small wrapper to time how long reading metadata takes.
fn get_metadata_section(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
let start = time::precise_time_ns();
let ret = get_metadata_section_imp(os, filename);
info!("reading {} => {}ms", filename.filename_display(),
(time::precise_time_ns() - start) / 1000000);
return ret;
}
fn get_metadata_section_imp(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
if !filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
if filename.filename_str().unwrap().ends_with(".rlib") {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive = match ArchiveRO::open(filename) {
Some(ar) => ar,
None => {
debug!("llvm didn't like `{}`", filename.display());
return Err(format!("failed to read rlib metadata: '{}'",
filename.display()));
}
};
return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) {
None => return Err(format!("failed to read rlib metadata: '{}'",
filename.display())),
Some(blob) => return Ok(blob)
}
}
unsafe {
let mb = filename.with_c_str(|buf| {
llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf)
});
if mb as int == 0 {
return Err(format!("error reading library: '{}'",filename.display()))
}
let of = match ObjectFile::new(mb) {
Some(of) => of,
_ => return Err(format!("provided path not an object file: '{}'", filename.display()))
};
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = str::raw::from_buf_len(name_buf as *u8, name_len as uint);
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(os) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let mut found = Err(format!("metadata not found: '{}'", filename.display()));
let cvbuf: *u8 = cast::transmute(cbuf);
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let version_ok = slice::raw::buf_as_slice(cvbuf, minsz,
|buf0| buf0 == encoder::metadata_encoding_version);
if !version_ok { return Err(format!("incompatible metadata version found: '{}'",
filename.display())); }
let cvbuf1 = cvbuf.offset(vlen as int);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| {
match flate::inflate_bytes(bytes) {
Some(inflated) => found = Ok(MetadataVec(inflated)),
None => found = Err(format!("failed to decompress metadata for: '{}'",
filename.display()))
}
});
if found.is_ok() {
return found;
}
}
llvm::LLVMMoveToNextSection(si.llsi);
}
return Err(format!("metadata not found: '{}'", filename.display()));
}
}
pub fn meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__DATA,__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
pub fn read_meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
// A diagnostic function for dumping crate metadata to an output stream
pub fn list_file_metadata(os: Os, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
match get_metadata_section(os, path) {
Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out),
Err(msg) => {
write!(out, "{}\n", msg)
}
}
} | random_line_split |
|
loader.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Finds crate binaries and loads their metadata
use back::archive::{ArchiveRO, METADATA_FILENAME};
use back::svh::Svh;
use driver::session::Session;
use lib::llvm::{False, llvm, ObjectFile, mk_section_iter};
use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive};
use metadata::decoder;
use metadata::encoder;
use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
use syntax::codemap::Span;
use syntax::diagnostic::SpanHandler;
use syntax::crateid::CrateId;
use syntax::attr::AttrMetaMethods;
use util::fs;
use std::c_str::ToCStr;
use std::cast;
use std::cmp;
use std::io;
use std::ptr;
use std::slice;
use std::str;
use collections::{HashMap, HashSet};
use flate;
use time;
pub static MACOS_DLL_PREFIX: &'static str = "lib";
pub static MACOS_DLL_SUFFIX: &'static str = ".dylib";
pub static WIN32_DLL_PREFIX: &'static str = "";
pub static WIN32_DLL_SUFFIX: &'static str = ".dll";
pub static LINUX_DLL_PREFIX: &'static str = "lib";
pub static LINUX_DLL_SUFFIX: &'static str = ".so";
pub static FREEBSD_DLL_PREFIX: &'static str = "lib";
pub static FREEBSD_DLL_SUFFIX: &'static str = ".so";
pub static ANDROID_DLL_PREFIX: &'static str = "lib";
pub static ANDROID_DLL_SUFFIX: &'static str = ".so";
pub enum Os {
OsMacos,
OsWin32,
OsLinux,
OsAndroid,
OsFreebsd
}
pub struct CrateMismatch {
path: Path,
got: ~str,
}
pub struct Context<'a> {
pub sess: &'a Session,
pub span: Span,
pub ident: &'a str,
pub crate_id: &'a CrateId,
pub id_hash: &'a str,
pub hash: Option<&'a Svh>,
pub triple: &'a str,
pub os: Os,
pub filesearch: FileSearch<'a>,
pub root: &'a Option<CratePaths>,
pub rejected_via_hash: Vec<CrateMismatch>,
pub rejected_via_triple: Vec<CrateMismatch>,
}
pub struct Library {
pub dylib: Option<Path>,
pub rlib: Option<Path>,
pub metadata: MetadataBlob,
}
pub struct ArchiveMetadata {
archive: ArchiveRO,
// See comments in ArchiveMetadata::new for why this is static
data: &'static [u8],
}
pub struct CratePaths {
pub ident: ~str,
pub dylib: Option<Path>,
pub rlib: Option<Path>
}
impl CratePaths {
fn paths(&self) -> Vec<Path> {
match (&self.dylib, &self.rlib) {
(&None, &None) => vec!(),
(&Some(ref p), &None) |
(&None, &Some(ref p)) => vec!(p.clone()),
(&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()),
}
}
}
impl<'a> Context<'a> {
pub fn maybe_load_library_crate(&mut self) -> Option<Library> {
self.find_library_crate()
}
pub fn load_library_crate(&mut self) -> Library {
match self.find_library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message);
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span, format!("expected triple of {}", self.triple));
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()));
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()));
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()));
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix) && file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix, ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix) && file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file, dylib_prefix, dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name));
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => |
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<~str>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice() != vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_owned())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_owned())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found", flavor, self.crate_id.name));
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref().display()));
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()));
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice() != self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch{ path: libpath.clone(),
got: triple.to_owned() });
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash != hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch{ path: libpath.clone(),
got: myhash.as_str().to_owned() });
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()));
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and
// store the data as a static buffer.
unsafe { cast::transmute(data) }
};
Some(ArchiveMetadata {
archive: ar,
data: data,
})
}
pub fn as_slice<'a>(&'a self) -> &'a [u8] { self.data }
}
// Just a small wrapper to time how long reading metadata takes.
fn get_metadata_section(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
let start = time::precise_time_ns();
let ret = get_metadata_section_imp(os, filename);
info!("reading {} => {}ms", filename.filename_display(),
(time::precise_time_ns() - start) / 1000000);
return ret;
}
fn get_metadata_section_imp(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
if !filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
if filename.filename_str().unwrap().ends_with(".rlib") {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive = match ArchiveRO::open(filename) {
Some(ar) => ar,
None => {
debug!("llvm didn't like `{}`", filename.display());
return Err(format!("failed to read rlib metadata: '{}'",
filename.display()));
}
};
return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) {
None => return Err(format!("failed to read rlib metadata: '{}'",
filename.display())),
Some(blob) => return Ok(blob)
}
}
unsafe {
let mb = filename.with_c_str(|buf| {
llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf)
});
if mb as int == 0 {
return Err(format!("error reading library: '{}'",filename.display()))
}
let of = match ObjectFile::new(mb) {
Some(of) => of,
_ => return Err(format!("provided path not an object file: '{}'", filename.display()))
};
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = str::raw::from_buf_len(name_buf as *u8, name_len as uint);
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(os) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let mut found = Err(format!("metadata not found: '{}'", filename.display()));
let cvbuf: *u8 = cast::transmute(cbuf);
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let version_ok = slice::raw::buf_as_slice(cvbuf, minsz,
|buf0| buf0 == encoder::metadata_encoding_version);
if !version_ok { return Err(format!("incompatible metadata version found: '{}'",
filename.display())); }
let cvbuf1 = cvbuf.offset(vlen as int);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| {
match flate::inflate_bytes(bytes) {
Some(inflated) => found = Ok(MetadataVec(inflated)),
None => found = Err(format!("failed to decompress metadata for: '{}'",
filename.display()))
}
});
if found.is_ok() {
return found;
}
}
llvm::LLVMMoveToNextSection(si.llsi);
}
return Err(format!("metadata not found: '{}'", filename.display()));
}
}
pub fn meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__DATA,__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
pub fn read_meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
// A diagnostic function for dumping crate metadata to an output stream
pub fn list_file_metadata(os: Os, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
match get_metadata_section(os, path) {
Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out),
Err(msg) => {
write!(out, "{}\n", msg)
}
}
}
| {} | conditional_block |
loader.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Finds crate binaries and loads their metadata
use back::archive::{ArchiveRO, METADATA_FILENAME};
use back::svh::Svh;
use driver::session::Session;
use lib::llvm::{False, llvm, ObjectFile, mk_section_iter};
use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive};
use metadata::decoder;
use metadata::encoder;
use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
use syntax::codemap::Span;
use syntax::diagnostic::SpanHandler;
use syntax::crateid::CrateId;
use syntax::attr::AttrMetaMethods;
use util::fs;
use std::c_str::ToCStr;
use std::cast;
use std::cmp;
use std::io;
use std::ptr;
use std::slice;
use std::str;
use collections::{HashMap, HashSet};
use flate;
use time;
pub static MACOS_DLL_PREFIX: &'static str = "lib";
pub static MACOS_DLL_SUFFIX: &'static str = ".dylib";
pub static WIN32_DLL_PREFIX: &'static str = "";
pub static WIN32_DLL_SUFFIX: &'static str = ".dll";
pub static LINUX_DLL_PREFIX: &'static str = "lib";
pub static LINUX_DLL_SUFFIX: &'static str = ".so";
pub static FREEBSD_DLL_PREFIX: &'static str = "lib";
pub static FREEBSD_DLL_SUFFIX: &'static str = ".so";
pub static ANDROID_DLL_PREFIX: &'static str = "lib";
pub static ANDROID_DLL_SUFFIX: &'static str = ".so";
pub enum Os {
OsMacos,
OsWin32,
OsLinux,
OsAndroid,
OsFreebsd
}
pub struct CrateMismatch {
path: Path,
got: ~str,
}
pub struct Context<'a> {
pub sess: &'a Session,
pub span: Span,
pub ident: &'a str,
pub crate_id: &'a CrateId,
pub id_hash: &'a str,
pub hash: Option<&'a Svh>,
pub triple: &'a str,
pub os: Os,
pub filesearch: FileSearch<'a>,
pub root: &'a Option<CratePaths>,
pub rejected_via_hash: Vec<CrateMismatch>,
pub rejected_via_triple: Vec<CrateMismatch>,
}
pub struct Library {
pub dylib: Option<Path>,
pub rlib: Option<Path>,
pub metadata: MetadataBlob,
}
pub struct ArchiveMetadata {
archive: ArchiveRO,
// See comments in ArchiveMetadata::new for why this is static
data: &'static [u8],
}
pub struct | {
pub ident: ~str,
pub dylib: Option<Path>,
pub rlib: Option<Path>
}
impl CratePaths {
fn paths(&self) -> Vec<Path> {
match (&self.dylib, &self.rlib) {
(&None, &None) => vec!(),
(&Some(ref p), &None) |
(&None, &Some(ref p)) => vec!(p.clone()),
(&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()),
}
}
}
impl<'a> Context<'a> {
pub fn maybe_load_library_crate(&mut self) -> Option<Library> {
self.find_library_crate()
}
pub fn load_library_crate(&mut self) -> Library {
match self.find_library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message);
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span, format!("expected triple of {}", self.triple));
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()));
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()));
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()));
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix) && file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix, ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix) && file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file, dylib_prefix, dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name));
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<~str>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice() != vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_owned())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_owned())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found", flavor, self.crate_id.name));
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref().display()));
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()));
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice() != self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch{ path: libpath.clone(),
got: triple.to_owned() });
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash != hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch{ path: libpath.clone(),
got: myhash.as_str().to_owned() });
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()));
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and
// store the data as a static buffer.
unsafe { cast::transmute(data) }
};
Some(ArchiveMetadata {
archive: ar,
data: data,
})
}
pub fn as_slice<'a>(&'a self) -> &'a [u8] { self.data }
}
// Just a small wrapper to time how long reading metadata takes.
fn get_metadata_section(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
let start = time::precise_time_ns();
let ret = get_metadata_section_imp(os, filename);
info!("reading {} => {}ms", filename.filename_display(),
(time::precise_time_ns() - start) / 1000000);
return ret;
}
fn get_metadata_section_imp(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
if !filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
if filename.filename_str().unwrap().ends_with(".rlib") {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive = match ArchiveRO::open(filename) {
Some(ar) => ar,
None => {
debug!("llvm didn't like `{}`", filename.display());
return Err(format!("failed to read rlib metadata: '{}'",
filename.display()));
}
};
return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) {
None => return Err(format!("failed to read rlib metadata: '{}'",
filename.display())),
Some(blob) => return Ok(blob)
}
}
unsafe {
let mb = filename.with_c_str(|buf| {
llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf)
});
if mb as int == 0 {
return Err(format!("error reading library: '{}'",filename.display()))
}
let of = match ObjectFile::new(mb) {
Some(of) => of,
_ => return Err(format!("provided path not an object file: '{}'", filename.display()))
};
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = str::raw::from_buf_len(name_buf as *u8, name_len as uint);
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(os) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let mut found = Err(format!("metadata not found: '{}'", filename.display()));
let cvbuf: *u8 = cast::transmute(cbuf);
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let version_ok = slice::raw::buf_as_slice(cvbuf, minsz,
|buf0| buf0 == encoder::metadata_encoding_version);
if !version_ok { return Err(format!("incompatible metadata version found: '{}'",
filename.display())); }
let cvbuf1 = cvbuf.offset(vlen as int);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| {
match flate::inflate_bytes(bytes) {
Some(inflated) => found = Ok(MetadataVec(inflated)),
None => found = Err(format!("failed to decompress metadata for: '{}'",
filename.display()))
}
});
if found.is_ok() {
return found;
}
}
llvm::LLVMMoveToNextSection(si.llsi);
}
return Err(format!("metadata not found: '{}'", filename.display()));
}
}
pub fn meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__DATA,__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
pub fn read_meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
// A diagnostic function for dumping crate metadata to an output stream
pub fn list_file_metadata(os: Os, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
match get_metadata_section(os, path) {
Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out),
Err(msg) => {
write!(out, "{}\n", msg)
}
}
}
| CratePaths | identifier_name |
loader.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Finds crate binaries and loads their metadata
use back::archive::{ArchiveRO, METADATA_FILENAME};
use back::svh::Svh;
use driver::session::Session;
use lib::llvm::{False, llvm, ObjectFile, mk_section_iter};
use metadata::cstore::{MetadataBlob, MetadataVec, MetadataArchive};
use metadata::decoder;
use metadata::encoder;
use metadata::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
use syntax::codemap::Span;
use syntax::diagnostic::SpanHandler;
use syntax::crateid::CrateId;
use syntax::attr::AttrMetaMethods;
use util::fs;
use std::c_str::ToCStr;
use std::cast;
use std::cmp;
use std::io;
use std::ptr;
use std::slice;
use std::str;
use collections::{HashMap, HashSet};
use flate;
use time;
pub static MACOS_DLL_PREFIX: &'static str = "lib";
pub static MACOS_DLL_SUFFIX: &'static str = ".dylib";
pub static WIN32_DLL_PREFIX: &'static str = "";
pub static WIN32_DLL_SUFFIX: &'static str = ".dll";
pub static LINUX_DLL_PREFIX: &'static str = "lib";
pub static LINUX_DLL_SUFFIX: &'static str = ".so";
pub static FREEBSD_DLL_PREFIX: &'static str = "lib";
pub static FREEBSD_DLL_SUFFIX: &'static str = ".so";
pub static ANDROID_DLL_PREFIX: &'static str = "lib";
pub static ANDROID_DLL_SUFFIX: &'static str = ".so";
pub enum Os {
OsMacos,
OsWin32,
OsLinux,
OsAndroid,
OsFreebsd
}
pub struct CrateMismatch {
path: Path,
got: ~str,
}
pub struct Context<'a> {
pub sess: &'a Session,
pub span: Span,
pub ident: &'a str,
pub crate_id: &'a CrateId,
pub id_hash: &'a str,
pub hash: Option<&'a Svh>,
pub triple: &'a str,
pub os: Os,
pub filesearch: FileSearch<'a>,
pub root: &'a Option<CratePaths>,
pub rejected_via_hash: Vec<CrateMismatch>,
pub rejected_via_triple: Vec<CrateMismatch>,
}
pub struct Library {
pub dylib: Option<Path>,
pub rlib: Option<Path>,
pub metadata: MetadataBlob,
}
pub struct ArchiveMetadata {
archive: ArchiveRO,
// See comments in ArchiveMetadata::new for why this is static
data: &'static [u8],
}
pub struct CratePaths {
pub ident: ~str,
pub dylib: Option<Path>,
pub rlib: Option<Path>
}
impl CratePaths {
fn paths(&self) -> Vec<Path> |
}
impl<'a> Context<'a> {
pub fn maybe_load_library_crate(&mut self) -> Option<Library> {
self.find_library_crate()
}
pub fn load_library_crate(&mut self) -> Library {
match self.find_library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message);
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span, format!("expected triple of {}", self.triple));
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()));
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()));
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()));
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix) && file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix, ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix) && file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file, dylib_prefix, dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name));
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}", p.display()));
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<~str>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice() != vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_owned())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_owned())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found", flavor, self.crate_id.name));
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref().display()));
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()));
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice() != self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch{ path: libpath.clone(),
got: triple.to_owned() });
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash != hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch{ path: libpath.clone(),
got: myhash.as_str().to_owned() });
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()));
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and
// store the data as a static buffer.
unsafe { cast::transmute(data) }
};
Some(ArchiveMetadata {
archive: ar,
data: data,
})
}
pub fn as_slice<'a>(&'a self) -> &'a [u8] { self.data }
}
// Just a small wrapper to time how long reading metadata takes.
fn get_metadata_section(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
let start = time::precise_time_ns();
let ret = get_metadata_section_imp(os, filename);
info!("reading {} => {}ms", filename.filename_display(),
(time::precise_time_ns() - start) / 1000000);
return ret;
}
fn get_metadata_section_imp(os: Os, filename: &Path) -> Result<MetadataBlob, ~str> {
if !filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
if filename.filename_str().unwrap().ends_with(".rlib") {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive = match ArchiveRO::open(filename) {
Some(ar) => ar,
None => {
debug!("llvm didn't like `{}`", filename.display());
return Err(format!("failed to read rlib metadata: '{}'",
filename.display()));
}
};
return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) {
None => return Err(format!("failed to read rlib metadata: '{}'",
filename.display())),
Some(blob) => return Ok(blob)
}
}
unsafe {
let mb = filename.with_c_str(|buf| {
llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf)
});
if mb as int == 0 {
return Err(format!("error reading library: '{}'",filename.display()))
}
let of = match ObjectFile::new(mb) {
Some(of) => of,
_ => return Err(format!("provided path not an object file: '{}'", filename.display()))
};
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = str::raw::from_buf_len(name_buf as *u8, name_len as uint);
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(os) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let mut found = Err(format!("metadata not found: '{}'", filename.display()));
let cvbuf: *u8 = cast::transmute(cbuf);
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let version_ok = slice::raw::buf_as_slice(cvbuf, minsz,
|buf0| buf0 == encoder::metadata_encoding_version);
if !version_ok { return Err(format!("incompatible metadata version found: '{}'",
filename.display())); }
let cvbuf1 = cvbuf.offset(vlen as int);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| {
match flate::inflate_bytes(bytes) {
Some(inflated) => found = Ok(MetadataVec(inflated)),
None => found = Err(format!("failed to decompress metadata for: '{}'",
filename.display()))
}
});
if found.is_ok() {
return found;
}
}
llvm::LLVMMoveToNextSection(si.llsi);
}
return Err(format!("metadata not found: '{}'", filename.display()));
}
}
pub fn meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__DATA,__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
pub fn read_meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
// A diagnostic function for dumping crate metadata to an output stream
pub fn list_file_metadata(os: Os, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
match get_metadata_section(os, path) {
Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out),
Err(msg) => {
write!(out, "{}\n", msg)
}
}
}
| {
match (&self.dylib, &self.rlib) {
(&None, &None) => vec!(),
(&Some(ref p), &None) |
(&None, &Some(ref p)) => vec!(p.clone()),
(&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()),
}
} | identifier_body |
test_deprecations.py | import logging
import pytest
from traitlets.config import Config
from dockerspawner import DockerSpawner
def | (caplog):
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
log = logging.getLogger("testlog")
spawner = DockerSpawner(config=cfg, log=log)
assert caplog.record_tuples == [
(
log.name,
logging.WARNING,
'DockerSpawner.image_whitelist is deprecated in DockerSpawner 12.0, use '
'DockerSpawner.allowed_images instead',
)
]
assert spawner.allowed_images == {"1.0": "jupyterhub/singleuser:1.0"}
async def test_deprecated_methods():
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
spawner = DockerSpawner(config=cfg)
assert await spawner.check_allowed("1.0")
with pytest.deprecated_call():
assert await spawner.check_image_whitelist("1.0")
| test_deprecated_config | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.