file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test-amp-vk.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const POST_PARAMS = {
'embedtype': 'post',
'hash': 'Yc8_Z9pnpg8aKMZbVcD-jK45eAk',
'owner-id': '1',
'post-id': '45616',
};
const POLL_PARAMS = {
'embedtype': 'poll',
'api-id': '6183531',
'poll-id': '274086843_1a2a465f60fff4699f',
};
import '../amp-vk';
import {Layout} from '../../../../src/layout';
import {Resource} from '../../../../src/service/resource';
describes.realWin('amp-vk', {
amp: {
extensions: ['amp-vk'],
},
}, env => {
let win, doc;
beforeEach(() => {
win = env.win;
doc = win.document;
});
function | (dataParams, layout) {
const element = doc.createElement('amp-vk');
for (const param in dataParams) {
element.setAttribute(`data-${param}`, dataParams[param]);
}
element.setAttribute('width', 500);
element.setAttribute('height', 300);
if (layout) {
element.setAttribute('layout', layout);
}
doc.body.appendChild(element);
return element.build().then(() => {
const resource = Resource.forElement(element);
resource.measure();
return element.layoutCallback();
}).then(() => element);
}
it('requires data-embedtype', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['embedtype'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-embedtype attribute is required for/);
});
it('removes iframe after unlayoutCallback', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
const obj = vkPost.implementation_;
obj.unlayoutCallback();
expect(vkPost.querySelector('iframe')).to.be.null;
expect(obj.iframe_).to.be.null;
expect(obj.unlayoutOnPause()).to.be.true;
});
});
// Post tests
it('post::requires data-hash', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['hash'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-hash attribute is required for/);
});
it('post::requires data-owner-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['owner-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-owner-id attribute is required for/);
});
it('post::requires data-post-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['post-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-post-id attribute is required for/);
});
it('post::renders iframe in amp-vk', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('post::renders responsively', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('post::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const impl = vkPost.implementation_;
const iframe = vkPost.querySelector('iframe');
const referrer = encodeURIComponent(vkPost.ownerDocument.referrer);
const url = encodeURIComponent(
vkPost.ownerDocument.location.href.replace(/#.*$/, '')
);
impl.onLayoutMeasure();
const startWidth = impl.getLayoutWidth();
const correctIFrameSrc = `https://vk.com/widget_post.php?app=0&width=100%25\
&_ver=1&owner_id=1&post_id=45616&hash=Yc8_Z9pnpg8aKMZbVcD-jK45eAk&=1\
&startWidth=${startWidth}&url=${url}&referrer=${referrer}&title=AMP%20Post`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
// Poll tests
it('poll::requires data-api-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['api-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-api-id attribute is required for/);
});
it('poll::requires data-poll-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['poll-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-poll-id attribute is required for/);
});
it('poll::renders iframe in amp-vk', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('poll::renders responsively', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('poll::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
const referrer = encodeURIComponent(vkPoll.ownerDocument.referrer);
const url = encodeURIComponent(
vkPoll.ownerDocument.location.href.replace(/#.*$/, '')
);
const correctIFrameSrc = `https://vk.com/al_widget_poll.php?\
app=6183531&width=100%25&_ver=1&poll_id=274086843_1a2a465f60fff4699f&=1\
&url=${url}&title=AMP%20Poll&description=&referrer=${referrer}`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
it('both::resizes amp-vk element in response to postmessages', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const impl = vkPoll.implementation_;
const iframe = vkPoll.querySelector('iframe');
const changeHeight = sandbox.spy(impl, 'changeHeight');
const fakeHeight = 555;
expect(iframe).to.not.be.null;
generatePostMessage(vkPoll, iframe, fakeHeight);
expect(changeHeight).to.be.calledOnce;
expect(changeHeight.firstCall.args[0]).to.equal(fakeHeight);
});
});
function generatePostMessage(ins, iframe, height) {
ins.implementation_.handleVkIframeMessage_({
origin: 'https://vk.com',
source: iframe.contentWindow,
data: JSON.stringify([
'resize',
[height],
]),
});
}
});
| createAmpVkElement | identifier_name |
test-amp-vk.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const POST_PARAMS = {
'embedtype': 'post',
'hash': 'Yc8_Z9pnpg8aKMZbVcD-jK45eAk',
'owner-id': '1',
'post-id': '45616',
};
const POLL_PARAMS = {
'embedtype': 'poll',
'api-id': '6183531',
'poll-id': '274086843_1a2a465f60fff4699f',
};
import '../amp-vk';
import {Layout} from '../../../../src/layout';
import {Resource} from '../../../../src/service/resource';
describes.realWin('amp-vk', {
amp: {
extensions: ['amp-vk'],
},
}, env => {
let win, doc;
beforeEach(() => {
win = env.win;
doc = win.document;
});
function createAmpVkElement(dataParams, layout) {
const element = doc.createElement('amp-vk');
for (const param in dataParams) {
element.setAttribute(`data-${param}`, dataParams[param]);
}
element.setAttribute('width', 500);
element.setAttribute('height', 300);
if (layout) |
doc.body.appendChild(element);
return element.build().then(() => {
const resource = Resource.forElement(element);
resource.measure();
return element.layoutCallback();
}).then(() => element);
}
it('requires data-embedtype', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['embedtype'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-embedtype attribute is required for/);
});
it('removes iframe after unlayoutCallback', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
const obj = vkPost.implementation_;
obj.unlayoutCallback();
expect(vkPost.querySelector('iframe')).to.be.null;
expect(obj.iframe_).to.be.null;
expect(obj.unlayoutOnPause()).to.be.true;
});
});
// Post tests
it('post::requires data-hash', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['hash'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-hash attribute is required for/);
});
it('post::requires data-owner-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['owner-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-owner-id attribute is required for/);
});
it('post::requires data-post-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['post-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-post-id attribute is required for/);
});
it('post::renders iframe in amp-vk', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('post::renders responsively', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('post::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const impl = vkPost.implementation_;
const iframe = vkPost.querySelector('iframe');
const referrer = encodeURIComponent(vkPost.ownerDocument.referrer);
const url = encodeURIComponent(
vkPost.ownerDocument.location.href.replace(/#.*$/, '')
);
impl.onLayoutMeasure();
const startWidth = impl.getLayoutWidth();
const correctIFrameSrc = `https://vk.com/widget_post.php?app=0&width=100%25\
&_ver=1&owner_id=1&post_id=45616&hash=Yc8_Z9pnpg8aKMZbVcD-jK45eAk&=1\
&startWidth=${startWidth}&url=${url}&referrer=${referrer}&title=AMP%20Post`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
// Poll tests
it('poll::requires data-api-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['api-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-api-id attribute is required for/);
});
it('poll::requires data-poll-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['poll-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-poll-id attribute is required for/);
});
it('poll::renders iframe in amp-vk', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('poll::renders responsively', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('poll::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
const referrer = encodeURIComponent(vkPoll.ownerDocument.referrer);
const url = encodeURIComponent(
vkPoll.ownerDocument.location.href.replace(/#.*$/, '')
);
const correctIFrameSrc = `https://vk.com/al_widget_poll.php?\
app=6183531&width=100%25&_ver=1&poll_id=274086843_1a2a465f60fff4699f&=1\
&url=${url}&title=AMP%20Poll&description=&referrer=${referrer}`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
it('both::resizes amp-vk element in response to postmessages', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const impl = vkPoll.implementation_;
const iframe = vkPoll.querySelector('iframe');
const changeHeight = sandbox.spy(impl, 'changeHeight');
const fakeHeight = 555;
expect(iframe).to.not.be.null;
generatePostMessage(vkPoll, iframe, fakeHeight);
expect(changeHeight).to.be.calledOnce;
expect(changeHeight.firstCall.args[0]).to.equal(fakeHeight);
});
});
function generatePostMessage(ins, iframe, height) {
ins.implementation_.handleVkIframeMessage_({
origin: 'https://vk.com',
source: iframe.contentWindow,
data: JSON.stringify([
'resize',
[height],
]),
});
}
});
| {
element.setAttribute('layout', layout);
} | conditional_block |
test-amp-vk.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const POST_PARAMS = {
'embedtype': 'post',
'hash': 'Yc8_Z9pnpg8aKMZbVcD-jK45eAk',
'owner-id': '1',
'post-id': '45616',
};
const POLL_PARAMS = {
'embedtype': 'poll',
'api-id': '6183531',
'poll-id': '274086843_1a2a465f60fff4699f',
};
import '../amp-vk';
import {Layout} from '../../../../src/layout';
import {Resource} from '../../../../src/service/resource';
describes.realWin('amp-vk', {
amp: {
extensions: ['amp-vk'],
},
}, env => {
let win, doc;
beforeEach(() => {
win = env.win;
doc = win.document;
});
function createAmpVkElement(dataParams, layout) |
it('requires data-embedtype', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['embedtype'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-embedtype attribute is required for/);
});
it('removes iframe after unlayoutCallback', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
const obj = vkPost.implementation_;
obj.unlayoutCallback();
expect(vkPost.querySelector('iframe')).to.be.null;
expect(obj.iframe_).to.be.null;
expect(obj.unlayoutOnPause()).to.be.true;
});
});
// Post tests
it('post::requires data-hash', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['hash'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-hash attribute is required for/);
});
it('post::requires data-owner-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['owner-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-owner-id attribute is required for/);
});
it('post::requires data-post-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['post-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-post-id attribute is required for/);
});
it('post::renders iframe in amp-vk', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('post::renders responsively', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('post::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const impl = vkPost.implementation_;
const iframe = vkPost.querySelector('iframe');
const referrer = encodeURIComponent(vkPost.ownerDocument.referrer);
const url = encodeURIComponent(
vkPost.ownerDocument.location.href.replace(/#.*$/, '')
);
impl.onLayoutMeasure();
const startWidth = impl.getLayoutWidth();
const correctIFrameSrc = `https://vk.com/widget_post.php?app=0&width=100%25\
&_ver=1&owner_id=1&post_id=45616&hash=Yc8_Z9pnpg8aKMZbVcD-jK45eAk&=1\
&startWidth=${startWidth}&url=${url}&referrer=${referrer}&title=AMP%20Post`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
// Poll tests
it('poll::requires data-api-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['api-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-api-id attribute is required for/);
});
it('poll::requires data-poll-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['poll-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-poll-id attribute is required for/);
});
it('poll::renders iframe in amp-vk', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('poll::renders responsively', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('poll::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
const referrer = encodeURIComponent(vkPoll.ownerDocument.referrer);
const url = encodeURIComponent(
vkPoll.ownerDocument.location.href.replace(/#.*$/, '')
);
const correctIFrameSrc = `https://vk.com/al_widget_poll.php?\
app=6183531&width=100%25&_ver=1&poll_id=274086843_1a2a465f60fff4699f&=1\
&url=${url}&title=AMP%20Poll&description=&referrer=${referrer}`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
it('both::resizes amp-vk element in response to postmessages', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const impl = vkPoll.implementation_;
const iframe = vkPoll.querySelector('iframe');
const changeHeight = sandbox.spy(impl, 'changeHeight');
const fakeHeight = 555;
expect(iframe).to.not.be.null;
generatePostMessage(vkPoll, iframe, fakeHeight);
expect(changeHeight).to.be.calledOnce;
expect(changeHeight.firstCall.args[0]).to.equal(fakeHeight);
});
});
function generatePostMessage(ins, iframe, height) {
ins.implementation_.handleVkIframeMessage_({
origin: 'https://vk.com',
source: iframe.contentWindow,
data: JSON.stringify([
'resize',
[height],
]),
});
}
});
| {
const element = doc.createElement('amp-vk');
for (const param in dataParams) {
element.setAttribute(`data-${param}`, dataParams[param]);
}
element.setAttribute('width', 500);
element.setAttribute('height', 300);
if (layout) {
element.setAttribute('layout', layout);
}
doc.body.appendChild(element);
return element.build().then(() => {
const resource = Resource.forElement(element);
resource.measure();
return element.layoutCallback();
}).then(() => element);
} | identifier_body |
Account.js | module.exports = function(config, mongoose, nodemailer) {
var crypto = require('crypto');
var Status = new mongoose.Schema({
name: {
first: { type: String },
last: { type: String }
},
status: { type: String }
});
var Contact = new mongoose.Schema({
name: {
first: { type:String },
last: { type: String }
},
accountId: { type: mongoose.Schema.ObjectId },
added: { type: Date }, // When the contact was added
updated: { type: Date } // When the contact last updated
});
var AccountSchema = new mongoose.Schema({
email: { type: String, unique: true },
password: { type: String },
name: {
first: { type: String },
last: { type: String }
},
birthday: {
day: { type: Number, min: 1, max: 31, required: false },
month: { type: Number, min: 1, max: 12, required: false },
year: { type: Number }
},
contacts: [Contact],
photoUrl: { type: String },
biography: { type: String },
status: [Status], // My own status updates only
activity: [Status] // All status updates including friends
});
var Account = mongoose.model('Account', AccountSchema);
var registerCallback = function(err) {
if (err) {
return console.log(err); | return console.log('Account was created');
};
var changePassword = function(accountId, newpassword) {
var shaSum = crypto.createHash('sha256');
shaSum.update(newpassword);
var hashedPassword = shaSum.digest('hex');
Account.update({_id:accountId},
{
$set: { password : hashedPassword }
},
{
upsert:false
},
function changePasswordCallback(err) {
console.log('Change password done for account ' + accountId);
});
};
var forgotPassword = function(email, resetPasswordUrl, callback) {
var user = Account.findOne({email: email},
function findAccount(err, doc){
if (err) {
// Email address is not a valid user
callback(false);
} else {
var smtpTransport = nodemailer.createTransport('SMTP',
config.mail);
resetPasswordUrl += '?account=' + doc._id;
smtpTransport.sendMail({
from: '[email protected]',
to: doc.email,
subject: 'SocialNet Password Request',
text: 'Click here to reset your password: ' +
resetPasswordUrl
}, function forgotPasswordResult(err) {
if (err) {
callback(false);
} else {
callback(true);
}
});
}
});
};
var login = function(email, password, callback) {
var shaSum = crypto.createHash('sha256');
shaSum.update(password);
Account.findOne({
email:email,
password:shaSum.digest('hex')
},function(err,doc){
callback(doc);
});
};
var findById = function(accountId, callback) {
console.log('findById: Finding ' + accountId);
Account.findOne({_id: accountId}, function(err,doc) {
if(err) {
throw new Error('Error occured: ' + err);
}
if (doc) {
callback(doc);
} else {
console.log('Account.findOne: no document found');
}
});
};
var register = function(email, password, firstName, lastName) {
var shaSum = crypto.createHash('sha256');
shaSum.update(password);
console.log('Registering ' + email);
var user = new Account({
email: email,
name: {
first: firstName,
last: lastName
},
password: shaSum.digest('hex')
});
user.save(registerCallback);
console.log('Save command was sent');
};
var findByString = function(searchStr, callback) {
var searchRegex = new RegExp(searchStr, 'i');
Account.find({
$or: [
{ 'name.full': { $regex: searchRegex } },
{ email: { $regex: searchRegex } }
]
}, callback);
};
var addContact = function(account, addcontact) {
var contact = {
name: {
// Instead of { name: account.name } because of some bug
first: addcontact.name.first,
last: addcontact.name.last
},
accountId: addcontact._id,
added: new Date(),
updated: new Date()
};
var arr = account.contacts;
arr.push(contact);
account.update({}, {$set: {contacts: arr}}, function () {
// console.log(account.contacts);
account.save(function (err) {
if (err) {
console.log('Error saving account: ' + err);
}
});
});
};
var removeContact = function(account, contactId) {
if ( null === account.contacts ) {
console.log('No contacts in the account');
return;
}
console.log('Finding and deleting: ' + contactId);
account.contacts.forEach(function(contact) {
console.log('checking against: ' + contact.accountId);
if ( contact.accountId == contactId ) {
console.log('Match found and deleted');
account.contacts.remove(contact);
// // Find and remove item from an array
// var i = account.contacts.indexOf(contact);
// if(i != -1) {
// array.splice(i, 1);
// }
}
});
account.save(function (err, doc, noAffected) {
// console.log('updated rec saved');
// console.log(doc)
// console.log('EODoc');
});
};
var hasContact = function(account, contactId) {
if ( null === account.contacts ) {
return false;
}
account.contacts.forEach(function(contact) {
if ( contact.accountId === contactId ) {
return true;
}
});
return false;
};
return {
findById: findById,
register: register,
forgotPassword: forgotPassword,
changePassword: changePassword,
login: login,
Account: Account,
findByString: findByString,
addContact: addContact,
removeContact: removeContact,
hasContact: hasContact
};
}; | } | random_line_split |
Account.js | module.exports = function(config, mongoose, nodemailer) {
var crypto = require('crypto');
var Status = new mongoose.Schema({
name: {
first: { type: String },
last: { type: String }
},
status: { type: String }
});
var Contact = new mongoose.Schema({
name: {
first: { type:String },
last: { type: String }
},
accountId: { type: mongoose.Schema.ObjectId },
added: { type: Date }, // When the contact was added
updated: { type: Date } // When the contact last updated
});
var AccountSchema = new mongoose.Schema({
email: { type: String, unique: true },
password: { type: String },
name: {
first: { type: String },
last: { type: String }
},
birthday: {
day: { type: Number, min: 1, max: 31, required: false },
month: { type: Number, min: 1, max: 12, required: false },
year: { type: Number }
},
contacts: [Contact],
photoUrl: { type: String },
biography: { type: String },
status: [Status], // My own status updates only
activity: [Status] // All status updates including friends
});
var Account = mongoose.model('Account', AccountSchema);
var registerCallback = function(err) {
if (err) |
return console.log('Account was created');
};
var changePassword = function(accountId, newpassword) {
var shaSum = crypto.createHash('sha256');
shaSum.update(newpassword);
var hashedPassword = shaSum.digest('hex');
Account.update({_id:accountId},
{
$set: { password : hashedPassword }
},
{
upsert:false
},
function changePasswordCallback(err) {
console.log('Change password done for account ' + accountId);
});
};
var forgotPassword = function(email, resetPasswordUrl, callback) {
var user = Account.findOne({email: email},
function findAccount(err, doc){
if (err) {
// Email address is not a valid user
callback(false);
} else {
var smtpTransport = nodemailer.createTransport('SMTP',
config.mail);
resetPasswordUrl += '?account=' + doc._id;
smtpTransport.sendMail({
from: '[email protected]',
to: doc.email,
subject: 'SocialNet Password Request',
text: 'Click here to reset your password: ' +
resetPasswordUrl
}, function forgotPasswordResult(err) {
if (err) {
callback(false);
} else {
callback(true);
}
});
}
});
};
var login = function(email, password, callback) {
var shaSum = crypto.createHash('sha256');
shaSum.update(password);
Account.findOne({
email:email,
password:shaSum.digest('hex')
},function(err,doc){
callback(doc);
});
};
var findById = function(accountId, callback) {
console.log('findById: Finding ' + accountId);
Account.findOne({_id: accountId}, function(err,doc) {
if(err) {
throw new Error('Error occured: ' + err);
}
if (doc) {
callback(doc);
} else {
console.log('Account.findOne: no document found');
}
});
};
var register = function(email, password, firstName, lastName) {
var shaSum = crypto.createHash('sha256');
shaSum.update(password);
console.log('Registering ' + email);
var user = new Account({
email: email,
name: {
first: firstName,
last: lastName
},
password: shaSum.digest('hex')
});
user.save(registerCallback);
console.log('Save command was sent');
};
var findByString = function(searchStr, callback) {
var searchRegex = new RegExp(searchStr, 'i');
Account.find({
$or: [
{ 'name.full': { $regex: searchRegex } },
{ email: { $regex: searchRegex } }
]
}, callback);
};
var addContact = function(account, addcontact) {
var contact = {
name: {
// Instead of { name: account.name } because of some bug
first: addcontact.name.first,
last: addcontact.name.last
},
accountId: addcontact._id,
added: new Date(),
updated: new Date()
};
var arr = account.contacts;
arr.push(contact);
account.update({}, {$set: {contacts: arr}}, function () {
// console.log(account.contacts);
account.save(function (err) {
if (err) {
console.log('Error saving account: ' + err);
}
});
});
};
var removeContact = function(account, contactId) {
if ( null === account.contacts ) {
console.log('No contacts in the account');
return;
}
console.log('Finding and deleting: ' + contactId);
account.contacts.forEach(function(contact) {
console.log('checking against: ' + contact.accountId);
if ( contact.accountId == contactId ) {
console.log('Match found and deleted');
account.contacts.remove(contact);
// // Find and remove item from an array
// var i = account.contacts.indexOf(contact);
// if(i != -1) {
// array.splice(i, 1);
// }
}
});
account.save(function (err, doc, noAffected) {
// console.log('updated rec saved');
// console.log(doc)
// console.log('EODoc');
});
};
var hasContact = function(account, contactId) {
if ( null === account.contacts ) {
return false;
}
account.contacts.forEach(function(contact) {
if ( contact.accountId === contactId ) {
return true;
}
});
return false;
};
return {
findById: findById,
register: register,
forgotPassword: forgotPassword,
changePassword: changePassword,
login: login,
Account: Account,
findByString: findByString,
addContact: addContact,
removeContact: removeContact,
hasContact: hasContact
};
};
| {
return console.log(err);
} | conditional_block |
TestClass4.ts | import { ReducerOf } from '../../src/ReducerOf'
import { State } from '../helpers/State'
import { HandlerOf } from '../../src/HandlerOf'
import * as Constants from './constants'
import { Action, Store } from 'redux'
import { ActionsObservable } from 'redux-observable'
import 'rxjs/add/operator/map'
export const ACTION_TYPE1_FROM_BAR1 = 'ACTION_TYPE1_FROM_BAR1'
export const ACTION_TYPE1_FROM_BAR2 = 'ACTION_TYPE1_FROM_BAR2'
export class TestClass4 {
@ReducerOf([Constants.ACTION_TYPE1])
foo1(state: Array<string> = [], action: Action) {
return [...state, 'foo1']
}
foo2(state: Array<string> = [], action: Action) {
return [...state, 'foo2']
}
@ReducerOf([Constants.ACTION_TYPE1])
foo3(state: Array<string> = [], action: Action) {
return [...state, 'foo3']
}
@HandlerOf([Constants.ACTION_TYPE1])
bar1(action: Action, store: Store<State>) {
return { type: ACTION_TYPE1_FROM_BAR1 }
}
@HandlerOf([Constants.ACTION_TYPE1], true)
bar2(action$: ActionsObservable<Action>, store: Store<State>) |
} | {
return action$.map(action => { return { type: ACTION_TYPE1_FROM_BAR2 } })
} | identifier_body |
TestClass4.ts | import { ReducerOf } from '../../src/ReducerOf'
import { State } from '../helpers/State'
import { HandlerOf } from '../../src/HandlerOf'
import * as Constants from './constants'
import { Action, Store } from 'redux'
import { ActionsObservable } from 'redux-observable'
import 'rxjs/add/operator/map'
export const ACTION_TYPE1_FROM_BAR1 = 'ACTION_TYPE1_FROM_BAR1'
export const ACTION_TYPE1_FROM_BAR2 = 'ACTION_TYPE1_FROM_BAR2'
export class TestClass4 {
@ReducerOf([Constants.ACTION_TYPE1])
| (state: Array<string> = [], action: Action) {
return [...state, 'foo1']
}
foo2(state: Array<string> = [], action: Action) {
return [...state, 'foo2']
}
@ReducerOf([Constants.ACTION_TYPE1])
foo3(state: Array<string> = [], action: Action) {
return [...state, 'foo3']
}
@HandlerOf([Constants.ACTION_TYPE1])
bar1(action: Action, store: Store<State>) {
return { type: ACTION_TYPE1_FROM_BAR1 }
}
@HandlerOf([Constants.ACTION_TYPE1], true)
bar2(action$: ActionsObservable<Action>, store: Store<State>) {
return action$.map(action => { return { type: ACTION_TYPE1_FROM_BAR2 } })
}
} | foo1 | identifier_name |
TestClass4.ts | import { ReducerOf } from '../../src/ReducerOf'
import { State } from '../helpers/State'
import { HandlerOf } from '../../src/HandlerOf'
import * as Constants from './constants'
import { Action, Store } from 'redux'
import { ActionsObservable } from 'redux-observable'
import 'rxjs/add/operator/map'
export const ACTION_TYPE1_FROM_BAR1 = 'ACTION_TYPE1_FROM_BAR1'
export const ACTION_TYPE1_FROM_BAR2 = 'ACTION_TYPE1_FROM_BAR2'
export class TestClass4 {
@ReducerOf([Constants.ACTION_TYPE1])
foo1(state: Array<string> = [], action: Action) {
return [...state, 'foo1']
}
foo2(state: Array<string> = [], action: Action) {
return [...state, 'foo2']
}
@ReducerOf([Constants.ACTION_TYPE1])
foo3(state: Array<string> = [], action: Action) {
return [...state, 'foo3']
}
@HandlerOf([Constants.ACTION_TYPE1])
bar1(action: Action, store: Store<State>) {
return { type: ACTION_TYPE1_FROM_BAR1 }
}
@HandlerOf([Constants.ACTION_TYPE1], true)
bar2(action$: ActionsObservable<Action>, store: Store<State>) { |
} | return action$.map(action => { return { type: ACTION_TYPE1_FROM_BAR2 } })
}
| random_line_split |
constants.py | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as p_const
# Special vlan_tci value indicating flat network
FLAT_VLAN_TCI = '0x0000/0x1fff'
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Name prefixes for veth device or patch port pair linking the integration
# bridge with the physical bridge for a physical network
PEER_INTEGRATION_PREFIX = 'int-'
PEER_PHYSICAL_PREFIX = 'phy-'
# Nonexistent peer used to create patch ports without associating them, it
# allows to define flows before association
NONEXISTENT_PEER = 'nonexistent-peer'
# The different types of tunnels
TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN,
p_const.TYPE_GENEVE]
# --- OpenFlow table IDs
# --- Integration bridge (int_br)
LOCAL_SWITCHING = 0
# The pyhsical network types of support DVR router
DVR_PHYSICAL_NETWORK_TYPES = [p_const.TYPE_VLAN, p_const.TYPE_FLAT]
# Various tables for DVR use of integration bridge flows
DVR_TO_SRC_MAC = 1
DVR_TO_SRC_MAC_PHYSICAL = 2
ARP_DVR_MAC_TO_DST_MAC = 3
ARP_DVR_MAC_TO_DST_MAC_PHYSICAL = 4
CANARY_TABLE = 23
# Table for ARP poison/spoofing prevention rules
ARP_SPOOF_TABLE = 24
# Table for MAC spoof filtering
MAC_SPOOF_TABLE = 25
LOCAL_EGRESS_TABLE = 30
LOCAL_IP_TABLE = 31
# packet rate limit table
PACKET_RATE_LIMIT = 59
# Table to decide whether further filtering is needed
TRANSIENT_TABLE = 60
LOCAL_MAC_DIRECT = 61
TRANSIENT_EGRESS_TABLE = 62
# Table for DHCP
DHCP_IPV4_TABLE = 77
DHCP_IPV6_TABLE = 78
# Tables used for ovs firewall
BASE_EGRESS_TABLE = 71
RULES_EGRESS_TABLE = 72
ACCEPT_OR_INGRESS_TABLE = 73
BASE_INGRESS_TABLE = 81
RULES_INGRESS_TABLE = 82
OVS_FIREWALL_TABLES = (
BASE_EGRESS_TABLE,
RULES_EGRESS_TABLE,
ACCEPT_OR_INGRESS_TABLE,
BASE_INGRESS_TABLE,
RULES_INGRESS_TABLE,
)
# Tables for parties interacting with ovs firewall
ACCEPTED_EGRESS_TRAFFIC_TABLE = 91
ACCEPTED_INGRESS_TRAFFIC_TABLE = 92
DROPPED_TRAFFIC_TABLE = 93
ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE = 94
INT_BR_ALL_TABLES = (
LOCAL_SWITCHING,
DVR_TO_SRC_MAC,
DVR_TO_SRC_MAC_PHYSICAL,
CANARY_TABLE,
ARP_SPOOF_TABLE,
MAC_SPOOF_TABLE,
LOCAL_MAC_DIRECT,
LOCAL_EGRESS_TABLE,
LOCAL_IP_TABLE,
PACKET_RATE_LIMIT,
TRANSIENT_TABLE,
TRANSIENT_EGRESS_TABLE,
BASE_EGRESS_TABLE,
RULES_EGRESS_TABLE,
ACCEPT_OR_INGRESS_TABLE,
DHCP_IPV4_TABLE,
DHCP_IPV6_TABLE,
BASE_INGRESS_TABLE,
RULES_INGRESS_TABLE,
ACCEPTED_EGRESS_TRAFFIC_TABLE,
ACCEPTED_INGRESS_TRAFFIC_TABLE,
DROPPED_TRAFFIC_TABLE)
# --- Tunnel bridge (tun_br)
# Various tables for tunneling flows
DVR_PROCESS = 1
PATCH_LV_TO_TUN = 2
GRE_TUN_TO_LV = 3
VXLAN_TUN_TO_LV = 4
GENEVE_TUN_TO_LV = 6
DVR_NOT_LEARN = 9
LEARN_FROM_TUN = 10
UCAST_TO_TUN = 20
ARP_RESPONDER = 21
FLOOD_TO_TUN = 22
# NOTE(vsaienko): transit table used by networking-bagpipe driver to
# mirror traffic to EVPN and standard tunnels to gateway nodes
BAGPIPE_FLOOD_TO_TUN_BROADCAST = 222
TUN_BR_ALL_TABLES = (
LOCAL_SWITCHING,
DVR_PROCESS,
PATCH_LV_TO_TUN,
GRE_TUN_TO_LV,
VXLAN_TUN_TO_LV,
GENEVE_TUN_TO_LV,
DVR_NOT_LEARN,
LEARN_FROM_TUN,
UCAST_TO_TUN,
ARP_RESPONDER,
FLOOD_TO_TUN)
# --- Physical Bridges (phys_brs)
# Various tables for DVR use of physical bridge flows
DVR_PROCESS_PHYSICAL = 1
LOCAL_VLAN_TRANSLATION = 2
DVR_NOT_LEARN_PHYSICAL = 3
PHY_BR_ALL_TABLES = (
LOCAL_SWITCHING,
DVR_PROCESS_PHYSICAL,
LOCAL_VLAN_TRANSLATION,
DVR_NOT_LEARN_PHYSICAL)
# --- end of OpenFlow table IDs
# type for ARP reply in ARP header
ARP_REPLY = '0x2'
# Map tunnel types to tables number
TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV,
p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV,
p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV}
# The default respawn interval for the ovsdb monitor
DEFAULT_OVSDBMON_RESPAWN = 30
# Represent invalid OF Port
OFPORT_INVALID = -1
ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,'
'load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],'
'in_port')
# Represent ovs status
OVS_RESTARTED = 0
OVS_NORMAL = 1
OVS_DEAD = 2
EXTENSION_DRIVER_TYPE = 'ovs'
# ovs datapath types
OVS_DATAPATH_SYSTEM = 'system'
OVS_DATAPATH_NETDEV = 'netdev'
OVS_DPDK_VHOST_USER = 'dpdkvhostuser'
OVS_DPDK_VHOST_USER_CLIENT = 'dpdkvhostuserclient'
OVS_DPDK_PORT_TYPES = [OVS_DPDK_VHOST_USER, OVS_DPDK_VHOST_USER_CLIENT]
# default ovs vhost-user socket location
VHOST_USER_SOCKET_DIR = '/var/run/openvswitch'
MAX_DEVICE_RETRIES = 5
# OpenFlow version constants
OPENFLOW10 = "OpenFlow10"
OPENFLOW11 = "OpenFlow11"
OPENFLOW12 = "OpenFlow12"
OPENFLOW13 = "OpenFlow13"
OPENFLOW14 = "OpenFlow14"
OPENFLOW15 = "OpenFlow15"
OPENFLOW_MAX_PRIORITY = 65535
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
# callback resource for setting 'bridge_name' in the 'binding:vif_details'
OVS_BRIDGE_NAME = 'ovs_bridge_name'
# callback resource for notifying to ovsdb handler
OVSDB_RESOURCE = 'ovsdb' | # ovs_cleanup script is used.
SKIP_CLEANUP = 'skip_cleanup' |
# Used in ovs port 'external_ids' in order mark it for no cleanup when | random_line_split |
findMainFile.js | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
'use strict';
const glob = require('glob');
const path = require('path');
/**
* Find the main file for the C# project
*
* @param {String} folder Name of the folder where to seek
* @return {String}
*/
module.exports = function findMainFile(folder) {
let mainFilePath = glob.sync('MainReactNativeHost.cs', {
cwd: folder,
ignore: ['node_modules/**', '**/build/**', 'Examples/**', 'examples/**'],
});
if (mainFilePath.length === 0) |
return mainFilePath && mainFilePath.length > 0 ? path.join(folder, mainFilePath[0]) : null;
};
| {
mainFilePath = glob.sync('MainPage.cs', {
cwd: folder,
ignore: ['node_modules/**', '**/build/**', 'Examples/**', 'examples/**'],
});
} | conditional_block |
findMainFile.js | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
'use strict';
const glob = require('glob');
const path = require('path');
/**
* Find the main file for the C# project
*
* @param {String} folder Name of the folder where to seek
* @return {String}
*/
module.exports = function findMainFile(folder) {
let mainFilePath = glob.sync('MainReactNativeHost.cs', {
cwd: folder,
ignore: ['node_modules/**', '**/build/**', 'Examples/**', 'examples/**'],
}); | ignore: ['node_modules/**', '**/build/**', 'Examples/**', 'examples/**'],
});
}
return mainFilePath && mainFilePath.length > 0 ? path.join(folder, mainFilePath[0]) : null;
}; |
if (mainFilePath.length === 0) {
mainFilePath = glob.sync('MainPage.cs', {
cwd: folder, | random_line_split |
Flickr.js | /* ************************************************************************
qooxdoo - the new era of web development
http://qooxdoo.org
Copyright:
2004-2009 1&1 Internet AG, Germany, http://www.1und1.de
License:
LGPL: http://www.gnu.org/licenses/lgpl.html
EPL: http://www.eclipse.org/org/documents/epl-v10.php
See the LICENSE file in the project's top-level directory for details.
Authors:
* Martin Wittemann (martinwittemann)
************************************************************************ */
/* ************************************************************************
************************************************************************ */
/**
* @ignore(demobrowser.demo.data.store.Flickr)
* @ignore(demobrowser.demo.data.store)
* @tag databinding
* @tag showcase
*/
qx.Class.define("demobrowser.demo.data.Flickr",
{
extend : qx.application.Standalone,
members :
{
main: function()
{
this.base(arguments);
// fetch some data from Flickr
var store = new demobrowser.demo.data.store.Flickr("qooxdoo");
/* ***********************************************
* CONTROLS
* ********************************************* */
var search = new qx.ui.form.TextField("qooxdoo");
this.getRoot().add(search, {left: 30, top: 50});
var searchGo = new qx.ui.form.Button("Search");
this.getRoot().add(searchGo, {left: 140, top: 49});
searchGo.addListener("execute", function() {
store.searchForTag(search.getValue());
}, this);
search.addListener("keydown", function(e) {
if (e.getKeyIdentifier() == "Enter") {
store.searchForTag(search.getValue());
}
}, this);
/* ***********************************************
* STATUS
* ********************************************* */
var status = new qx.ui.basic.Label("loading");
this.getRoot().add(status, {left: 210, top: 52}); | /* ***********************************************
* LIST OF PHOTOS
* ********************************************* */
var list = new qx.ui.form.List();
list.setWidth(700);
list.setHeight(110);
list.setOrientation("horizontal");
this.getRoot().add(list, {left: 30, right: 30, top: 80});
var controller = new qx.data.controller.List(null, list);
controller.setLabelPath("title");
controller.setDelegate({configureItem : function(item) {
item.setShow("icon");
}});
var iconOptions = {converter : function(data, model) {
return ("http://farm" + model.getFarm() + ".static.flickr.com/" + model.getServer() + "/"
+ data + "_" + model.getSecret() + "_s.jpg");
}};
controller.setIconOptions(iconOptions);
controller.setIconPath("id");
store.bind("model.photos.photo", controller, "model");
/* ***********************************************
* DETAIL VIEW
* ********************************************* */
var image = new qx.ui.basic.Image();
this.getRoot().add(image, {left: 30, top: 200});
var detailOptions = {converter : function(data) {
if (data) {
return ("http://farm" + data.getFarm() + ".static.flickr.com/" + data.getServer() + "/"
+ data.getId() + "_" + data.getSecret() + ".jpg");
}
return "";
}};
controller.bind("selection[0]", image, "source", detailOptions);
/* ***********************************************
* HEADLINE
* ********************************************* */
var headline = new qx.ui.basic.Label();
headline.setRich(true);
headline.setWidth(260);
headline.setValue(
"<span style='font-size: 20px'>Flickr</span>"
);
this.getRoot().add(headline, {left: 10, top: 10});
}
}
});
/*
* PLEASE NOTE:
* For demonstration purposes the following class is added to the same file as
* the application class. For a regular qooxdoo application each class must live
* in a file of its own. You may neglect any warnings when generating this demo.
*/
qx.Class.define("demobrowser.demo.data.store.Flickr",
{
extend : qx.data.store.Jsonp,
construct : function(tag)
{
this.setCallbackName("jsonFlickrApi");
this.base(arguments, this.__generateUrl(tag));
},
members :
{
searchForTag: function(tag) {
if (tag != "") {
this.setUrl(this.__generateUrl(tag));
}
},
__generateUrl: function(tag) {
if (!tag) {
return;
}
var url = "http://api.flickr.com/services/rest/?tags=" + tag;
url += "&method=flickr.photos.search&api_key=63a8042eead205f7e0040f488c02afd9&format=json";
return url;
}
}
}); | store.bind("state", status, "value");
| random_line_split |
Flickr.js | /* ************************************************************************
qooxdoo - the new era of web development
http://qooxdoo.org
Copyright:
2004-2009 1&1 Internet AG, Germany, http://www.1und1.de
License:
LGPL: http://www.gnu.org/licenses/lgpl.html
EPL: http://www.eclipse.org/org/documents/epl-v10.php
See the LICENSE file in the project's top-level directory for details.
Authors:
* Martin Wittemann (martinwittemann)
************************************************************************ */
/* ************************************************************************
************************************************************************ */
/**
* @ignore(demobrowser.demo.data.store.Flickr)
* @ignore(demobrowser.demo.data.store)
* @tag databinding
* @tag showcase
*/
qx.Class.define("demobrowser.demo.data.Flickr",
{
extend : qx.application.Standalone,
members :
{
main: function()
{
this.base(arguments);
// fetch some data from Flickr
var store = new demobrowser.demo.data.store.Flickr("qooxdoo");
/* ***********************************************
* CONTROLS
* ********************************************* */
var search = new qx.ui.form.TextField("qooxdoo");
this.getRoot().add(search, {left: 30, top: 50});
var searchGo = new qx.ui.form.Button("Search");
this.getRoot().add(searchGo, {left: 140, top: 49});
searchGo.addListener("execute", function() {
store.searchForTag(search.getValue());
}, this);
search.addListener("keydown", function(e) {
if (e.getKeyIdentifier() == "Enter") {
store.searchForTag(search.getValue());
}
}, this);
/* ***********************************************
* STATUS
* ********************************************* */
var status = new qx.ui.basic.Label("loading");
this.getRoot().add(status, {left: 210, top: 52});
store.bind("state", status, "value");
/* ***********************************************
* LIST OF PHOTOS
* ********************************************* */
var list = new qx.ui.form.List();
list.setWidth(700);
list.setHeight(110);
list.setOrientation("horizontal");
this.getRoot().add(list, {left: 30, right: 30, top: 80});
var controller = new qx.data.controller.List(null, list);
controller.setLabelPath("title");
controller.setDelegate({configureItem : function(item) {
item.setShow("icon");
}});
var iconOptions = {converter : function(data, model) {
return ("http://farm" + model.getFarm() + ".static.flickr.com/" + model.getServer() + "/"
+ data + "_" + model.getSecret() + "_s.jpg");
}};
controller.setIconOptions(iconOptions);
controller.setIconPath("id");
store.bind("model.photos.photo", controller, "model");
/* ***********************************************
* DETAIL VIEW
* ********************************************* */
var image = new qx.ui.basic.Image();
this.getRoot().add(image, {left: 30, top: 200});
var detailOptions = {converter : function(data) {
if (data) {
return ("http://farm" + data.getFarm() + ".static.flickr.com/" + data.getServer() + "/"
+ data.getId() + "_" + data.getSecret() + ".jpg");
}
return "";
}};
controller.bind("selection[0]", image, "source", detailOptions);
/* ***********************************************
* HEADLINE
* ********************************************* */
var headline = new qx.ui.basic.Label();
headline.setRich(true);
headline.setWidth(260);
headline.setValue(
"<span style='font-size: 20px'>Flickr</span>"
);
this.getRoot().add(headline, {left: 10, top: 10});
}
}
});
/*
* PLEASE NOTE:
* For demonstration purposes the following class is added to the same file as
* the application class. For a regular qooxdoo application each class must live
* in a file of its own. You may neglect any warnings when generating this demo.
*/
qx.Class.define("demobrowser.demo.data.store.Flickr",
{
extend : qx.data.store.Jsonp,
construct : function(tag)
{
this.setCallbackName("jsonFlickrApi");
this.base(arguments, this.__generateUrl(tag));
},
members :
{
searchForTag: function(tag) {
if (tag != "") |
},
__generateUrl: function(tag) {
if (!tag) {
return;
}
var url = "http://api.flickr.com/services/rest/?tags=" + tag;
url += "&method=flickr.photos.search&api_key=63a8042eead205f7e0040f488c02afd9&format=json";
return url;
}
}
});
| {
this.setUrl(this.__generateUrl(tag));
} | conditional_block |
fontfamilycelleditor.js | /**
*
*/
;
(function ($, window, document, undefined) {
var pluginName = "fontFamilyCellEditor";
var defaults = {
parent: "",
msg: "",
};
var MbSelectCellEditor = function (element, options) {
this.element = element;
this.options = $.extend({
parent: "",
msg: "",
}, defaults, options);
this._defaults = defaults;
this._name = pluginName;
};
MbSelectCellEditor.prototype.loadEditor = function (tag, entity, prop, isnull, owner) {
var that = this;
if ($(tag).attr("type") == "h") {
return;
}
var oldvalue = $(tag).html();
$(tag).html("");
var selection = $("<select name='selection' class='form-control'/></select>");
selection.appendTo($(tag));
if (isnull == "y") {
$("<option value='-1'>- 请选择 -</option>").appendTo($(selection));
}
for (x in fontfamilies) {
$("<option value='" + x + "'>" + fontfamilies[x] + "</option>").appendTo($(selection));
}
var nextTR = selection.closest('tr').next();
while (nextTR != undefined) {
var nextTD = nextTR.find("td")[1];
if (nextTD != undefined) {
if (nextTD.getAttribute("type") != "-1") {
break;
}
} else {
break;
}
nextTR = $(nextTR).next();
}
selection.keydown(function (event) {
var keyvalue = event.which;
if (keyvalue == 9) { // Tab key
event.preventDefault();
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
selection.remove();
if (nextTD != undefined) {
that.options.parent.startToEdit(nextTD);
}
} else {
selection.focus();
return false;
}
}
}).keyup(function (event) {
var keyvalue = event.which;
if (keyvalue == 13) {// enter key
event.preventDefault();
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
selection.remove();
if (nextTD != undefined) {
that.options.parent.startToEdit(nextTD);
}
} else {
selection.focus();
return false;
}
} else if (keyvalue == 27) {
$(tag).html(oldvalue);
}
}).blur(function () {
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
| {
selection.focus();
return false;
}
})
};
MbSelectCellEditor.prototype.changeValue = function (tag, entity, prop,
oldvalue, isnull, owner) {
var newvalue = $(tag).children("select").val();
if (isnull == "n" && (newvalue == null || newvalue == "-1")) {
this.options.msg.show("当前属性不能为空。");
return false;
} else {
if (newvalue != oldvalue) {
for (x in entity) {
if (x == prop) {
map[owner].stack
.execute(new OMMbPropChangedCmd(entity,
prop, newvalue, owner));
break;
}
}
} else {
$(tag).html(oldvalue);
}
return true;
}
};
$.fn[pluginName] = function (options) {
return this.each(function () {
if (!$.data(this, pluginName)) {
$.data(this, pluginName, new MbSelectCellEditor(this, options));
} else if ($.isFunction(Plugin.prototype[options])) {
$.data(this, pluginName)[options]();
}
});
};
})(jQuery, window, document); | selection.remove();
} else | conditional_block |
fontfamilycelleditor.js | /**
*
*/
;
(function ($, window, document, undefined) {
var pluginName = "fontFamilyCellEditor";
var defaults = {
parent: "",
msg: "",
};
var MbSelectCellEditor = function (element, options) {
this.element = element;
this.options = $.extend({
parent: "",
msg: "",
}, defaults, options);
this._defaults = defaults;
this._name = pluginName;
};
MbSelectCellEditor.prototype.loadEditor = function (tag, entity, prop, isnull, owner) {
var that = this;
if ($(tag).attr("type") == "h") {
return;
}
var oldvalue = $(tag).html();
$(tag).html("");
var selection = $("<select name='selection' class='form-control'/></select>");
selection.appendTo($(tag));
if (isnull == "y") {
$("<option value='-1'>- 请选择 -</option>").appendTo($(selection));
}
for (x in fontfamilies) {
$("<option value='" + x + "'>" + fontfamilies[x] + "</option>").appendTo($(selection));
}
var nextTR = selection.closest('tr').next();
while (nextTR != undefined) {
var nextTD = nextTR.find("td")[1];
if (nextTD != undefined) {
if (nextTD.getAttribute("type") != "-1") {
break;
}
} else {
break;
}
nextTR = $(nextTR).next();
}
selection.keydown(function (event) {
var keyvalue = event.which;
if (keyvalue == 9) { // Tab key
event.preventDefault();
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
selection.remove();
if (nextTD != undefined) {
that.options.parent.startToEdit(nextTD);
}
} else {
selection.focus();
return false;
}
}
}).keyup(function (event) {
var keyvalue = event.which;
if (keyvalue == 13) {// enter key
event.preventDefault();
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
selection.remove();
if (nextTD != undefined) {
that.options.parent.startToEdit(nextTD);
}
} else {
selection.focus();
return false;
}
} else if (keyvalue == 27) {
$(tag).html(oldvalue);
}
}).blur(function () {
if (that.changeValue(tag, entity, prop, oldvalue, isnull, owner)) {
selection.remove();
} else {
selection.focus();
return false;
}
})
};
MbSelectCellEditor.prototype.changeValue = function (tag, entity, prop,
oldvalue, isnull, owner) {
var newvalue = $(tag).children("select").val();
if (isnull == "n" && (newvalue == null || newvalue == "-1")) {
this.options.msg.show("当前属性不能为空。");
return false;
} else {
if (newvalue != oldvalue) {
for (x in entity) {
if (x == prop) {
map[owner].stack
.execute(new OMMbPropChangedCmd(entity,
prop, newvalue, owner));
break;
}
}
} else {
$(tag).html(oldvalue);
}
return true;
}
};
$.fn[pluginName] = function (options) {
return this.each(function () {
if (!$.data(this, pluginName)) {
$.data(this, pluginName, new MbSelectCellEditor(this, options));
} else if ($.isFunction(Plugin.prototype[options])) { |
})(jQuery, window, document); | $.data(this, pluginName)[options]();
}
});
}; | random_line_split |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn | (&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex() != checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| repo | identifier_name |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> |
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex() != checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
} | identifier_body |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration. |
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex() != checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
} | ops::http_handle(self.config)?; | random_line_split |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else |
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code != 200 && code != 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex() != checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| {
debug!("fast path failed, falling back to a git fetch");
} | conditional_block |
covidien_common.js | var filter_specialChars = "^[a-zA-Z0-9-_@\. ]+$";
var filter_specialChars_date = "^[0-9\/ ]+$";
$(document).ready(function(){
// For Menu Highlight - Covidien Brackets
var identifier = window.location.pathname;
var split = identifier.split("/");
var vTitle = $(this).attr('title').split("|");
function | ($ustring){
return $.inArray($ustring, split)> -1;
}
if (vTitle[0]=="User Settings ") {
//anch_user_settings
$('#anch_user_settings').after("<span class='T10C11'> ]</span>");
$('#anch_user_settings').before("<span class='T10C11'>[ </span>");
}
else if (check_url('node') && check_url('edit') && vTitle[0]!="User Settings ") {
//For edit catalogs
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('home')) {
//anch_home
$('#content-part').attr('style','border:0');
$('#anch_home').attr('class','active');
$('#anch_home').after("<span class='T10C2'> ]</span>");
$('#anch_home').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('device') || check_url('devices')) {
//anch_devices
if(check_url('device')) { $('#content-part').attr('style','border:0'); }
$('#anch_devices').attr('class','active');
$('#anch_devices').after("<span class='T10C2'> ]</span>");
$('#anch_devices').before("<span class='T10C2'>[ </span>");
}
else if (check_url('reports') || check_url('report') ) {
//anch_devices
$('#anch_reports').attr('class','active');
$('#anch_reports').after("<span class='T10C2'> ]</span>");
$('#anch_reports').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') || check_url('activity') || check_url('add') || check_url('add_new') || (check_url('node') && check_url('add'))){
//anch_system_admin
$('.manage_role').attr('style','border:0');
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
// Block enter Key for default submit
$("form input[type='text'],form select,form input[type='button']").bind("keypress", function(e) {
if (e.keyCode == 13) return false;
});
// Block Special characters in titles
$('.oval_search_wraper input').bind('keypress', function (event) {
var regex = new RegExp(filter_specialChars);
var key = String.fromCharCode(!event.charCode ? event.which : event.charCode);
if (!regex.test(key)) {
event.preventDefault();
return false;
}
});
// Login Screen Changes
// For Password Text - !IE
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
$('#fpage #edit-pass-clear').focus(function() {
$('#fpage #edit-pass-clear').hide();
$('#fpage #edit-pass').show();
$('#fpage #edit-pass').focus();
});
$('#fpage #edit-pass').blur(function() {
if($('#fpage #edit-pass').val() == '') {
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
}
});
$('input[type="text"]').each(function(){
if(this.value.indexOf('Username ') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
// For Role Popup
$('input[type="text"]').each(function(){
if(this.value.indexOf('Enter role') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
$("select option").each(function(i){
this.title = this.text;
});
});
| check_url | identifier_name |
covidien_common.js | var filter_specialChars = "^[a-zA-Z0-9-_@\. ]+$";
var filter_specialChars_date = "^[0-9\/ ]+$";
$(document).ready(function(){
// For Menu Highlight - Covidien Brackets
var identifier = window.location.pathname;
var split = identifier.split("/");
var vTitle = $(this).attr('title').split("|");
function check_url($ustring){
return $.inArray($ustring, split)> -1;
}
if (vTitle[0]=="User Settings ") {
//anch_user_settings
$('#anch_user_settings').after("<span class='T10C11'> ]</span>");
$('#anch_user_settings').before("<span class='T10C11'>[ </span>");
}
else if (check_url('node') && check_url('edit') && vTitle[0]!="User Settings ") {
//For edit catalogs
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('home')) {
//anch_home
$('#content-part').attr('style','border:0');
$('#anch_home').attr('class','active');
$('#anch_home').after("<span class='T10C2'> ]</span>");
$('#anch_home').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('device') || check_url('devices')) {
//anch_devices
if(check_url('device')) |
$('#anch_devices').attr('class','active');
$('#anch_devices').after("<span class='T10C2'> ]</span>");
$('#anch_devices').before("<span class='T10C2'>[ </span>");
}
else if (check_url('reports') || check_url('report') ) {
//anch_devices
$('#anch_reports').attr('class','active');
$('#anch_reports').after("<span class='T10C2'> ]</span>");
$('#anch_reports').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') || check_url('activity') || check_url('add') || check_url('add_new') || (check_url('node') && check_url('add'))){
//anch_system_admin
$('.manage_role').attr('style','border:0');
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
// Block enter Key for default submit
$("form input[type='text'],form select,form input[type='button']").bind("keypress", function(e) {
if (e.keyCode == 13) return false;
});
// Block Special characters in titles
$('.oval_search_wraper input').bind('keypress', function (event) {
var regex = new RegExp(filter_specialChars);
var key = String.fromCharCode(!event.charCode ? event.which : event.charCode);
if (!regex.test(key)) {
event.preventDefault();
return false;
}
});
// Login Screen Changes
// For Password Text - !IE
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
$('#fpage #edit-pass-clear').focus(function() {
$('#fpage #edit-pass-clear').hide();
$('#fpage #edit-pass').show();
$('#fpage #edit-pass').focus();
});
$('#fpage #edit-pass').blur(function() {
if($('#fpage #edit-pass').val() == '') {
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
}
});
$('input[type="text"]').each(function(){
if(this.value.indexOf('Username ') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
// For Role Popup
$('input[type="text"]').each(function(){
if(this.value.indexOf('Enter role') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
$("select option").each(function(i){
this.title = this.text;
});
});
| { $('#content-part').attr('style','border:0'); } | conditional_block |
covidien_common.js | var filter_specialChars = "^[a-zA-Z0-9-_@\. ]+$";
var filter_specialChars_date = "^[0-9\/ ]+$";
$(document).ready(function(){
// For Menu Highlight - Covidien Brackets
var identifier = window.location.pathname;
var split = identifier.split("/");
var vTitle = $(this).attr('title').split("|");
function check_url($ustring) |
if (vTitle[0]=="User Settings ") {
//anch_user_settings
$('#anch_user_settings').after("<span class='T10C11'> ]</span>");
$('#anch_user_settings').before("<span class='T10C11'>[ </span>");
}
else if (check_url('node') && check_url('edit') && vTitle[0]!="User Settings ") {
//For edit catalogs
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('home')) {
//anch_home
$('#content-part').attr('style','border:0');
$('#anch_home').attr('class','active');
$('#anch_home').after("<span class='T10C2'> ]</span>");
$('#anch_home').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('device') || check_url('devices')) {
//anch_devices
if(check_url('device')) { $('#content-part').attr('style','border:0'); }
$('#anch_devices').attr('class','active');
$('#anch_devices').after("<span class='T10C2'> ]</span>");
$('#anch_devices').before("<span class='T10C2'>[ </span>");
}
else if (check_url('reports') || check_url('report') ) {
//anch_devices
$('#anch_reports').attr('class','active');
$('#anch_reports').after("<span class='T10C2'> ]</span>");
$('#anch_reports').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') || check_url('activity') || check_url('add') || check_url('add_new') || (check_url('node') && check_url('add'))){
//anch_system_admin
$('.manage_role').attr('style','border:0');
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
// Block enter Key for default submit
$("form input[type='text'],form select,form input[type='button']").bind("keypress", function(e) {
if (e.keyCode == 13) return false;
});
// Block Special characters in titles
$('.oval_search_wraper input').bind('keypress', function (event) {
var regex = new RegExp(filter_specialChars);
var key = String.fromCharCode(!event.charCode ? event.which : event.charCode);
if (!regex.test(key)) {
event.preventDefault();
return false;
}
});
// Login Screen Changes
// For Password Text - !IE
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
$('#fpage #edit-pass-clear').focus(function() {
$('#fpage #edit-pass-clear').hide();
$('#fpage #edit-pass').show();
$('#fpage #edit-pass').focus();
});
$('#fpage #edit-pass').blur(function() {
if($('#fpage #edit-pass').val() == '') {
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
}
});
$('input[type="text"]').each(function(){
if(this.value.indexOf('Username ') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
// For Role Popup
$('input[type="text"]').each(function(){
if(this.value.indexOf('Enter role') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
$("select option").each(function(i){
this.title = this.text;
});
});
| {
return $.inArray($ustring, split)> -1;
} | identifier_body |
covidien_common.js | var filter_specialChars = "^[a-zA-Z0-9-_@\. ]+$";
var filter_specialChars_date = "^[0-9\/ ]+$";
$(document).ready(function(){
// For Menu Highlight - Covidien Brackets
var identifier = window.location.pathname;
var split = identifier.split("/");
var vTitle = $(this).attr('title').split("|");
function check_url($ustring){
return $.inArray($ustring, split)> -1;
}
if (vTitle[0]=="User Settings ") {
//anch_user_settings
$('#anch_user_settings').after("<span class='T10C11'> ]</span>");
$('#anch_user_settings').before("<span class='T10C11'>[ </span>"); | $('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('home')) {
//anch_home
$('#content-part').attr('style','border:0');
$('#anch_home').attr('class','active');
$('#anch_home').after("<span class='T10C2'> ]</span>");
$('#anch_home').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') && check_url('device') || check_url('devices')) {
//anch_devices
if(check_url('device')) { $('#content-part').attr('style','border:0'); }
$('#anch_devices').attr('class','active');
$('#anch_devices').after("<span class='T10C2'> ]</span>");
$('#anch_devices').before("<span class='T10C2'>[ </span>");
}
else if (check_url('reports') || check_url('report') ) {
//anch_devices
$('#anch_reports').attr('class','active');
$('#anch_reports').after("<span class='T10C2'> ]</span>");
$('#anch_reports').before("<span class='T10C2'>[ </span>");
}
else if (check_url('covidien') || check_url('activity') || check_url('add') || check_url('add_new') || (check_url('node') && check_url('add'))){
//anch_system_admin
$('.manage_role').attr('style','border:0');
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>");
$('#anch_system_admin').before("<span class='T10C2'>[ </span>");
}
// Block enter Key for default submit
$("form input[type='text'],form select,form input[type='button']").bind("keypress", function(e) {
if (e.keyCode == 13) return false;
});
// Block Special characters in titles
$('.oval_search_wraper input').bind('keypress', function (event) {
var regex = new RegExp(filter_specialChars);
var key = String.fromCharCode(!event.charCode ? event.which : event.charCode);
if (!regex.test(key)) {
event.preventDefault();
return false;
}
});
// Login Screen Changes
// For Password Text - !IE
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
$('#fpage #edit-pass-clear').focus(function() {
$('#fpage #edit-pass-clear').hide();
$('#fpage #edit-pass').show();
$('#fpage #edit-pass').focus();
});
$('#fpage #edit-pass').blur(function() {
if($('#fpage #edit-pass').val() == '') {
$('#fpage #edit-pass-clear').show();
$('#fpage #edit-pass').hide();
}
});
$('input[type="text"]').each(function(){
if(this.value.indexOf('Username ') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
// For Role Popup
$('input[type="text"]').each(function(){
if(this.value.indexOf('Enter role') >= 0) {
$(this).attr("title", this.value);
this.value = $(this).attr('title');
$(this).addClass('text-label');
$(this).focus(function(){
if(this.value == $(this).attr('title')) {
this.value = '';
$(this).removeClass('text-label');
}
});
$(this).blur(function(){
if(this.value == '') {
this.value = $(this).attr('title');
$(this).addClass('text-label');
}
});
}
});
$("select option").each(function(i){
this.title = this.text;
});
}); | }
else if (check_url('node') && check_url('edit') && vTitle[0]!="User Settings ") {
//For edit catalogs
$('#anch_system_admin').attr('class','active');
$('#anch_system_admin').after("<span class='T10C2'> ]</span>"); | random_line_split |
test-define-put.js | var assert = require('chai').assert;
var user = require('../util/models/user');
var util = require('./rest-builder-util');
var putObj = {
key: 'value'
};
var putData = {
q: putObj
};
describe('RestBuilder', function() {
describe('definePut', function() {
util.setUp(); | describe('no processors', function() {
it('should call db.del', function(done) {
this.rb.definePut('users', user, [], []);
this.client.put('/users/1', putData, function(err, req, res) {
assert.ifError(err);
util.mockDB.update.assertCalledOnceWithArgsIncluding(
['1', user, putObj]
);
done();
});
});
});
describe('with preprocessors', function () {
it('should call a single preprocessor', function(done) {
var mockPre = util.getMockPre();
this.rb.definePut('users', user, [mockPre], []);
this.client.put('/users/1', putData, function(err, req, res) {
mockPre.assertCalled('should call preprocessors');
assert.ifError(err);
util.mockDB.update.assertCalledOnceWithArgsIncluding(
['1', user, putObj]
);
done();
});
});
it('should call all the preprocessors', function (done) {
var mockPre = util.getMockPre();
var _mockPre = util.getMockPre();
this.rb.definePut('users', user, [mockPre, _mockPre], []);
this.client.put('/users/1', putData, function(err, req, res) {
mockPre.assertCalled('should call first preprocessor');
_mockPre.assertCalled('should call second preprocessor');
assert.ifError(err);
util.mockDB.update.assertCalledOnceWithArgsIncluding(
['1', user, putObj]
);
done();
});
});
});
});
}); | random_line_split |
|
linedlg.py | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
|
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
self.Update()
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None:
self.init_from_style(obj.Properties())
| draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash)) | conditional_block |
linedlg.py | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def | (tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
self.Update()
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None:
self.init_from_style(obj.Properties())
| create_arrow_images | identifier_name |
linedlg.py | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
|
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None:
self.init_from_style(obj.Properties())
| self.Update() | identifier_body |
linedlg.py | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
self.Update()
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None: | self.init_from_style(obj.Properties()) | random_line_split |
|
main.js | ;
(function() {
var app = angular.module('dashboardApp', [
'ngRoute',
'dashboard'
]);
var dashboard = angular.module('dashboard', []);
dashboard.run(function($rootScope, invocationUtils, stringUtils, api, urls) {
$rootScope.invocationUtils = invocationUtils;
$rootScope.stringUtils = stringUtils;
$rootScope._api = api;
$rootScope._urls = urls;
});
// this is a basis for some perf improvements
// for things that only needs to bind, well, once.
app.directive('bindOnce', function () {
return {
scope: true,
link: function($scope, $element) {
setTimeout(function () {
$scope.$destroy();
$element.removeClass('ng-binding ng-scope');
}, 0);
}
};
});
dashboard.factory('$exceptionHandler', function() {
return function(exception, cause) {
exception.message += ' (caused by "' + cause + '")';
console.log(["CATCH", exception, cause]);
throw exception;
};
});
app.config(['$routeProvider',
function ($routeProvider) {
var defaultHomePage = '/jobs'; //or /functions if not in Azure Web Sites
$routeProvider.
when('/', {
redirectTo: defaultHomePage
}).
when('/jobs', {
templateUrl: 'app/views/JobsList.html',
controller: 'JobsListController'
}).
when('/jobs/triggered/:jobName', {
templateUrl: 'app/views/TriggeredJob.html',
controller: 'TriggeredJobController'
}).
when('/jobs/continuous/:jobName', {
templateUrl: 'app/views/ContinuousJob.html',
controller: 'ContinuousJobController'
}).
when('/jobs/triggered/:jobName/runs/:runId', { | templateUrl: 'app/views/FunctionsHome.html',
controller: 'FunctionsHomeController'
}).
when('/functions/definitions/:functionId', {
templateUrl: 'app/views/Function.html',
controller: 'FunctionController'
}).
when('/functions/invocations/:invocationId', {
templateUrl: 'app/views/FunctionInvocation.html',
controller: 'FunctionInvocationController'
}).
when('/about', {
templateUrl: 'app/views/AboutHome.html',
controller: 'AboutController'
}).
when('/diagnostics/indexerLogEntry/:entryId', {
templateUrl: 'app/views/IndexerLogEntry.html',
controller: 'IndexerLogEntryController'
}).
otherwise({
redirectTo: '/'
});
}]);
// simple paging support
app.filter('startFrom', function() {
return function(input, start) {
start = +start; // ensure int
return input.slice(start);
};
});
app.run(function ($rootScope) {
// Initialize errors / warnings
$rootScope.errors = [];
$rootScope.warnings = [];
});
})(); | templateUrl: 'app/views/TriggeredJobRun.html',
controller: 'TriggeredJobRunController'
}).
when('/functions', { | random_line_split |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn main() {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 |
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
// ...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
// ...
// END rustc.foo.Inline.after.mir
| {
let x = |_t, _q| _t;
x(q, q)
} | identifier_body |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn | () {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 {
let x = |_t, _q| _t;
x(q, q)
}
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
// ...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
// ...
// END rustc.foo.Inline.after.mir
| main | identifier_name |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn main() {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 {
let x = |_t, _q| _t; | x(q, q)
}
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
// ...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
// ...
// END rustc.foo.Inline.after.mir | random_line_split |
|
setup.py | #!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def | ():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)
| requirements | identifier_name |
setup.py | #!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def requirements():
|
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)
| """Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list | identifier_body |
setup.py | #!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def requirements():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list
packages = find_packages(exclude=['tests*'])
| code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],) | with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh: | random_line_split |
setup.py | #!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import codecs
import os
from setuptools import setup, find_packages
def requirements():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
|
return requirements_list
packages = find_packages(exclude=['tests*'])
with codecs.open('README.rst', 'r', 'utf-8') as fd:
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
code = compile(fh.read(), fn, 'exec')
exec(code)
setup(name='python-telegram-bot',
version=__version__,
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
packages=packages,
install_requires=requirements(),
extras_require={
'json': 'ujson',
'socks': 'PySocks'
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],)
| requirements_list.append(install.strip()) | conditional_block |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> |
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn make_totp_helper(secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283);
assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
}
| {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
} | identifier_body |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
}
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn | (secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283);
assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
}
| make_totp_helper | identifier_name |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
}
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn make_totp_helper(secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182); | assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
} | assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283); | random_line_split |
GenericMeetingPanel.tsx | import sleep from 'ringcentral-integration/lib/sleep';
import React, { useState } from 'react';
import SpinnerOverlay from '../SpinnerOverlay';
import MeetingConfigs from '../MeetingConfigs';
import isSafari from '../../lib/isSafari';
import { VideoConfig, Topic } from '../VideoPanel/VideoConfig';
import { GenericMeetingPanelProps } from './interface';
import styles from './styles.scss';
const GenericMeetingPanel: React.ComponentType<GenericMeetingPanelProps> = (
props,
) => {
const { showCustom, CustomPanel } = props;
if (showCustom) {
return CustomPanel as JSX.Element;
}
const {
meeting,
disabled,
currentLocale,
scheduleButton: ScheduleButton,
recipientsSection,
showWhen,
showDuration,
showRecurringMeeting,
openNewWindow,
meetingOptionToggle,
passwordPlaceholderEnable,
audioOptionToggle,
onOK,
init,
showSaveAsDefault,
updateMeetingSettings,
validatePasswordSettings,
isRCM,
isRCV,
datePickerSize,
timePickerSize,
showLaunchMeetingBtn,
launchMeeting,
scheduleButtonLabel,
appCode,
schedule,
brandName,
personalMeetingId,
} = props;
if (!isRCM && !isRCV) {
return <SpinnerOverlay />;
}
// TODO: fix lint issue here
// eslint-disable-next-line react-hooks/rules-of-hooks
const [topicRef, setTopicRef] = useState(null);
return (
<div className={styles.wrapper}>
{isRCM && (
<MeetingConfigs
update={updateMeetingSettings}
init={init}
meeting={meeting}
disabled={disabled}
currentLocale={currentLocale}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
showRecurringMeeting={showRecurringMeeting}
openNewWindow={openNewWindow}
meetingOptionToggle={meetingOptionToggle}
passwordPlaceholderEnable={passwordPlaceholderEnable}
audioOptionToggle={audioOptionToggle}
/>
)}
{isRCV && (
<VideoConfig
currentLocale={currentLocale}
meeting={meeting}
updateMeetingSettings={updateMeetingSettings}
validatePasswordSettings={validatePasswordSettings}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
init={init}
datePickerSize={datePickerSize}
timePickerSize={timePickerSize}
brandName={brandName}
personalMeetingId={personalMeetingId}
>
<Topic
name={meeting.name}
updateMeetingTopic={(name) => {
updateMeetingSettings({ name });
}}
currentLocale={currentLocale}
setTopicRef={setTopicRef}
/>
</VideoConfig>
)}
{(isRCM || isRCV) && ScheduleButton && (
<ScheduleButton
currentLocale={currentLocale}
disabled={disabled}
meeting={meeting}
onOK={onOK}
onClick={async () => {
if (!disabled) {
await sleep(100);
const opener = openNewWindow && isSafari() ? window.open() : null;
const meetingSetting = isRCM
? meeting
: {
...meeting,
name: topicRef.current.props.value,
};
await schedule(meetingSetting, opener);
}
}}
update={updateMeetingSettings}
showSaveAsDefault={showSaveAsDefault}
launchMeeting={launchMeeting}
showLaunchMeetingBtn={showLaunchMeetingBtn}
appCode={appCode}
scheduleButtonLabel={scheduleButtonLabel}
/>
)}
</div>
);
};
GenericMeetingPanel.defaultProps = {
launchMeeting() | ,
disabled: false,
showWhen: true,
showDuration: true,
showRecurringMeeting: true,
openNewWindow: true,
meetingOptionToggle: false,
passwordPlaceholderEnable: false,
audioOptionToggle: false,
onOK: undefined,
scheduleButton: undefined,
showSaveAsDefault: true,
showCustom: false,
showLaunchMeetingBtn: false,
appCode: '',
scheduleButtonLabel: '',
personalMeetingId: undefined,
};
export { GenericMeetingPanel };
| {} | identifier_body |
GenericMeetingPanel.tsx | import sleep from 'ringcentral-integration/lib/sleep';
import React, { useState } from 'react';
import SpinnerOverlay from '../SpinnerOverlay';
import MeetingConfigs from '../MeetingConfigs';
import isSafari from '../../lib/isSafari';
import { VideoConfig, Topic } from '../VideoPanel/VideoConfig';
import { GenericMeetingPanelProps } from './interface';
import styles from './styles.scss';
const GenericMeetingPanel: React.ComponentType<GenericMeetingPanelProps> = (
props,
) => {
const { showCustom, CustomPanel } = props;
if (showCustom) |
const {
meeting,
disabled,
currentLocale,
scheduleButton: ScheduleButton,
recipientsSection,
showWhen,
showDuration,
showRecurringMeeting,
openNewWindow,
meetingOptionToggle,
passwordPlaceholderEnable,
audioOptionToggle,
onOK,
init,
showSaveAsDefault,
updateMeetingSettings,
validatePasswordSettings,
isRCM,
isRCV,
datePickerSize,
timePickerSize,
showLaunchMeetingBtn,
launchMeeting,
scheduleButtonLabel,
appCode,
schedule,
brandName,
personalMeetingId,
} = props;
if (!isRCM && !isRCV) {
return <SpinnerOverlay />;
}
// TODO: fix lint issue here
// eslint-disable-next-line react-hooks/rules-of-hooks
const [topicRef, setTopicRef] = useState(null);
return (
<div className={styles.wrapper}>
{isRCM && (
<MeetingConfigs
update={updateMeetingSettings}
init={init}
meeting={meeting}
disabled={disabled}
currentLocale={currentLocale}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
showRecurringMeeting={showRecurringMeeting}
openNewWindow={openNewWindow}
meetingOptionToggle={meetingOptionToggle}
passwordPlaceholderEnable={passwordPlaceholderEnable}
audioOptionToggle={audioOptionToggle}
/>
)}
{isRCV && (
<VideoConfig
currentLocale={currentLocale}
meeting={meeting}
updateMeetingSettings={updateMeetingSettings}
validatePasswordSettings={validatePasswordSettings}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
init={init}
datePickerSize={datePickerSize}
timePickerSize={timePickerSize}
brandName={brandName}
personalMeetingId={personalMeetingId}
>
<Topic
name={meeting.name}
updateMeetingTopic={(name) => {
updateMeetingSettings({ name });
}}
currentLocale={currentLocale}
setTopicRef={setTopicRef}
/>
</VideoConfig>
)}
{(isRCM || isRCV) && ScheduleButton && (
<ScheduleButton
currentLocale={currentLocale}
disabled={disabled}
meeting={meeting}
onOK={onOK}
onClick={async () => {
if (!disabled) {
await sleep(100);
const opener = openNewWindow && isSafari() ? window.open() : null;
const meetingSetting = isRCM
? meeting
: {
...meeting,
name: topicRef.current.props.value,
};
await schedule(meetingSetting, opener);
}
}}
update={updateMeetingSettings}
showSaveAsDefault={showSaveAsDefault}
launchMeeting={launchMeeting}
showLaunchMeetingBtn={showLaunchMeetingBtn}
appCode={appCode}
scheduleButtonLabel={scheduleButtonLabel}
/>
)}
</div>
);
};
GenericMeetingPanel.defaultProps = {
launchMeeting() {},
disabled: false,
showWhen: true,
showDuration: true,
showRecurringMeeting: true,
openNewWindow: true,
meetingOptionToggle: false,
passwordPlaceholderEnable: false,
audioOptionToggle: false,
onOK: undefined,
scheduleButton: undefined,
showSaveAsDefault: true,
showCustom: false,
showLaunchMeetingBtn: false,
appCode: '',
scheduleButtonLabel: '',
personalMeetingId: undefined,
};
export { GenericMeetingPanel };
| {
return CustomPanel as JSX.Element;
} | conditional_block |
GenericMeetingPanel.tsx | import sleep from 'ringcentral-integration/lib/sleep';
import React, { useState } from 'react';
import SpinnerOverlay from '../SpinnerOverlay';
import MeetingConfigs from '../MeetingConfigs';
import isSafari from '../../lib/isSafari';
import { VideoConfig, Topic } from '../VideoPanel/VideoConfig';
import { GenericMeetingPanelProps } from './interface';
import styles from './styles.scss';
const GenericMeetingPanel: React.ComponentType<GenericMeetingPanelProps> = (
props,
) => {
const { showCustom, CustomPanel } = props;
if (showCustom) {
return CustomPanel as JSX.Element;
}
const {
meeting,
disabled,
currentLocale,
scheduleButton: ScheduleButton,
recipientsSection,
showWhen,
showDuration,
showRecurringMeeting,
openNewWindow,
meetingOptionToggle,
passwordPlaceholderEnable,
audioOptionToggle,
onOK,
init,
showSaveAsDefault,
updateMeetingSettings, | timePickerSize,
showLaunchMeetingBtn,
launchMeeting,
scheduleButtonLabel,
appCode,
schedule,
brandName,
personalMeetingId,
} = props;
if (!isRCM && !isRCV) {
return <SpinnerOverlay />;
}
// TODO: fix lint issue here
// eslint-disable-next-line react-hooks/rules-of-hooks
const [topicRef, setTopicRef] = useState(null);
return (
<div className={styles.wrapper}>
{isRCM && (
<MeetingConfigs
update={updateMeetingSettings}
init={init}
meeting={meeting}
disabled={disabled}
currentLocale={currentLocale}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
showRecurringMeeting={showRecurringMeeting}
openNewWindow={openNewWindow}
meetingOptionToggle={meetingOptionToggle}
passwordPlaceholderEnable={passwordPlaceholderEnable}
audioOptionToggle={audioOptionToggle}
/>
)}
{isRCV && (
<VideoConfig
currentLocale={currentLocale}
meeting={meeting}
updateMeetingSettings={updateMeetingSettings}
validatePasswordSettings={validatePasswordSettings}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
init={init}
datePickerSize={datePickerSize}
timePickerSize={timePickerSize}
brandName={brandName}
personalMeetingId={personalMeetingId}
>
<Topic
name={meeting.name}
updateMeetingTopic={(name) => {
updateMeetingSettings({ name });
}}
currentLocale={currentLocale}
setTopicRef={setTopicRef}
/>
</VideoConfig>
)}
{(isRCM || isRCV) && ScheduleButton && (
<ScheduleButton
currentLocale={currentLocale}
disabled={disabled}
meeting={meeting}
onOK={onOK}
onClick={async () => {
if (!disabled) {
await sleep(100);
const opener = openNewWindow && isSafari() ? window.open() : null;
const meetingSetting = isRCM
? meeting
: {
...meeting,
name: topicRef.current.props.value,
};
await schedule(meetingSetting, opener);
}
}}
update={updateMeetingSettings}
showSaveAsDefault={showSaveAsDefault}
launchMeeting={launchMeeting}
showLaunchMeetingBtn={showLaunchMeetingBtn}
appCode={appCode}
scheduleButtonLabel={scheduleButtonLabel}
/>
)}
</div>
);
};
GenericMeetingPanel.defaultProps = {
launchMeeting() {},
disabled: false,
showWhen: true,
showDuration: true,
showRecurringMeeting: true,
openNewWindow: true,
meetingOptionToggle: false,
passwordPlaceholderEnable: false,
audioOptionToggle: false,
onOK: undefined,
scheduleButton: undefined,
showSaveAsDefault: true,
showCustom: false,
showLaunchMeetingBtn: false,
appCode: '',
scheduleButtonLabel: '',
personalMeetingId: undefined,
};
export { GenericMeetingPanel }; | validatePasswordSettings,
isRCM,
isRCV,
datePickerSize, | random_line_split |
GenericMeetingPanel.tsx | import sleep from 'ringcentral-integration/lib/sleep';
import React, { useState } from 'react';
import SpinnerOverlay from '../SpinnerOverlay';
import MeetingConfigs from '../MeetingConfigs';
import isSafari from '../../lib/isSafari';
import { VideoConfig, Topic } from '../VideoPanel/VideoConfig';
import { GenericMeetingPanelProps } from './interface';
import styles from './styles.scss';
const GenericMeetingPanel: React.ComponentType<GenericMeetingPanelProps> = (
props,
) => {
const { showCustom, CustomPanel } = props;
if (showCustom) {
return CustomPanel as JSX.Element;
}
const {
meeting,
disabled,
currentLocale,
scheduleButton: ScheduleButton,
recipientsSection,
showWhen,
showDuration,
showRecurringMeeting,
openNewWindow,
meetingOptionToggle,
passwordPlaceholderEnable,
audioOptionToggle,
onOK,
init,
showSaveAsDefault,
updateMeetingSettings,
validatePasswordSettings,
isRCM,
isRCV,
datePickerSize,
timePickerSize,
showLaunchMeetingBtn,
launchMeeting,
scheduleButtonLabel,
appCode,
schedule,
brandName,
personalMeetingId,
} = props;
if (!isRCM && !isRCV) {
return <SpinnerOverlay />;
}
// TODO: fix lint issue here
// eslint-disable-next-line react-hooks/rules-of-hooks
const [topicRef, setTopicRef] = useState(null);
return (
<div className={styles.wrapper}>
{isRCM && (
<MeetingConfigs
update={updateMeetingSettings}
init={init}
meeting={meeting}
disabled={disabled}
currentLocale={currentLocale}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
showRecurringMeeting={showRecurringMeeting}
openNewWindow={openNewWindow}
meetingOptionToggle={meetingOptionToggle}
passwordPlaceholderEnable={passwordPlaceholderEnable}
audioOptionToggle={audioOptionToggle}
/>
)}
{isRCV && (
<VideoConfig
currentLocale={currentLocale}
meeting={meeting}
updateMeetingSettings={updateMeetingSettings}
validatePasswordSettings={validatePasswordSettings}
recipientsSection={recipientsSection}
showWhen={showWhen}
showDuration={showDuration}
init={init}
datePickerSize={datePickerSize}
timePickerSize={timePickerSize}
brandName={brandName}
personalMeetingId={personalMeetingId}
>
<Topic
name={meeting.name}
updateMeetingTopic={(name) => {
updateMeetingSettings({ name });
}}
currentLocale={currentLocale}
setTopicRef={setTopicRef}
/>
</VideoConfig>
)}
{(isRCM || isRCV) && ScheduleButton && (
<ScheduleButton
currentLocale={currentLocale}
disabled={disabled}
meeting={meeting}
onOK={onOK}
onClick={async () => {
if (!disabled) {
await sleep(100);
const opener = openNewWindow && isSafari() ? window.open() : null;
const meetingSetting = isRCM
? meeting
: {
...meeting,
name: topicRef.current.props.value,
};
await schedule(meetingSetting, opener);
}
}}
update={updateMeetingSettings}
showSaveAsDefault={showSaveAsDefault}
launchMeeting={launchMeeting}
showLaunchMeetingBtn={showLaunchMeetingBtn}
appCode={appCode}
scheduleButtonLabel={scheduleButtonLabel}
/>
)}
</div>
);
};
GenericMeetingPanel.defaultProps = {
| () {},
disabled: false,
showWhen: true,
showDuration: true,
showRecurringMeeting: true,
openNewWindow: true,
meetingOptionToggle: false,
passwordPlaceholderEnable: false,
audioOptionToggle: false,
onOK: undefined,
scheduleButton: undefined,
showSaveAsDefault: true,
showCustom: false,
showLaunchMeetingBtn: false,
appCode: '',
scheduleButtonLabel: '',
personalMeetingId: undefined,
};
export { GenericMeetingPanel };
| launchMeeting | identifier_name |
account_tax_registry.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Agile Business Group
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
| class AccountTaxRegistry(models.Model):
_name = 'account.tax.registry'
name = fields.Char('Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'account.tax.registry'))
journal_ids = fields.One2many(
'account.journal', 'tax_registry_id', 'Journals', readonly=True)
type = fields.Selection([
('customer', 'Customer Invoices'),
('supplier', 'Supplier Invoices'),
('corrispettivi', 'Corrispettivi'),
], 'Layout', required=True) | from openerp import models, fields
| random_line_split |
account_tax_registry.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Agile Business Group
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields
class AccountTaxRegistry(models.Model):
| _name = 'account.tax.registry'
name = fields.Char('Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'account.tax.registry'))
journal_ids = fields.One2many(
'account.journal', 'tax_registry_id', 'Journals', readonly=True)
type = fields.Selection([
('customer', 'Customer Invoices'),
('supplier', 'Supplier Invoices'),
('corrispettivi', 'Corrispettivi'),
], 'Layout', required=True) | identifier_body |
|
account_tax_registry.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Agile Business Group
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields
class | (models.Model):
_name = 'account.tax.registry'
name = fields.Char('Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'account.tax.registry'))
journal_ids = fields.One2many(
'account.journal', 'tax_registry_id', 'Journals', readonly=True)
type = fields.Selection([
('customer', 'Customer Invoices'),
('supplier', 'Supplier Invoices'),
('corrispettivi', 'Corrispettivi'),
], 'Layout', required=True)
| AccountTaxRegistry | identifier_name |
models.py | from django.db import models
from django.db import models
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse_lazy, reverse
from django.conf import settings
from projects.models import Project, Customer
class Report (models.Model):
HIGHLIGHT = 'HL'
LOWLIGHT = 'LL'
ESCALATION = 'XS'
LIGHTS = (
(HIGHLIGHT, _('Highlight')),
(LOWLIGHT, _('Lowlight')),
(ESCALATION,_('Escalation')),
)
year = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("year"))
period = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("period"))
light = models.CharField (max_length=2, choices=LIGHTS, default=HIGHLIGHT)
description = models.TextField (null=False, blank=True, verbose_name=_("description"))
created = models.DateTimeField (auto_now_add=True)
created_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
modified = models.DateTimeField (auto_now=True)
modified_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
ordering = ['light','project','category']
unique_together = ("target", "year", "period")
abstract = True
def __str__(self):
return self.title
#return str(self.project) + (self.title if len(self.title) < 30 else (self.title[:27]+'...'))
return mark_safe("<b>%s</b>: %s" % (self.project, self.title))
# def get_absolute_url (self):
# return reverse('reporting-detail', args=[str(self.id)])
class ProjectReport (Report):
target = models.ForeignKey (Project, related_name='reports')
class Meta:
|
class CustomerReport (Report):
target = models.ForeignKey (Customer, related_name='reports')
class Meta:
verbose_name = _('Customer report')
verbose_name_plural = _('Customer reports')
| verbose_name = _('Project report')
verbose_name_plural = _('Project reports') | identifier_body |
models.py | from django.db import models
from django.db import models
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse_lazy, reverse
from django.conf import settings
from projects.models import Project, Customer
class Report (models.Model):
HIGHLIGHT = 'HL'
LOWLIGHT = 'LL'
ESCALATION = 'XS'
LIGHTS = (
(HIGHLIGHT, _('Highlight')),
(LOWLIGHT, _('Lowlight')),
(ESCALATION,_('Escalation')),
)
year = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("year"))
period = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("period"))
light = models.CharField (max_length=2, choices=LIGHTS, default=HIGHLIGHT)
description = models.TextField (null=False, blank=True, verbose_name=_("description"))
created = models.DateTimeField (auto_now_add=True)
created_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
modified = models.DateTimeField (auto_now=True)
modified_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
ordering = ['light','project','category']
unique_together = ("target", "year", "period")
abstract = True
def | (self):
return self.title
#return str(self.project) + (self.title if len(self.title) < 30 else (self.title[:27]+'...'))
return mark_safe("<b>%s</b>: %s" % (self.project, self.title))
# def get_absolute_url (self):
# return reverse('reporting-detail', args=[str(self.id)])
class ProjectReport (Report):
target = models.ForeignKey (Project, related_name='reports')
class Meta:
verbose_name = _('Project report')
verbose_name_plural = _('Project reports')
class CustomerReport (Report):
target = models.ForeignKey (Customer, related_name='reports')
class Meta:
verbose_name = _('Customer report')
verbose_name_plural = _('Customer reports')
| __str__ | identifier_name |
models.py | from django.db import models
from django.db import models
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse_lazy, reverse
from django.conf import settings
from projects.models import Project, Customer
class Report (models.Model):
HIGHLIGHT = 'HL'
LOWLIGHT = 'LL'
ESCALATION = 'XS'
LIGHTS = (
(HIGHLIGHT, _('Highlight')),
(LOWLIGHT, _('Lowlight')),
(ESCALATION,_('Escalation')),
)
year = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("year"))
period = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("period"))
light = models.CharField (max_length=2, choices=LIGHTS, default=HIGHLIGHT)
description = models.TextField (null=False, blank=True, verbose_name=_("description"))
created = models.DateTimeField (auto_now_add=True)
created_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
modified = models.DateTimeField (auto_now=True)
modified_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', editable=False)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
ordering = ['light','project','category']
unique_together = ("target", "year", "period")
abstract = True
def __str__(self):
return self.title | return mark_safe("<b>%s</b>: %s" % (self.project, self.title))
# def get_absolute_url (self):
# return reverse('reporting-detail', args=[str(self.id)])
class ProjectReport (Report):
target = models.ForeignKey (Project, related_name='reports')
class Meta:
verbose_name = _('Project report')
verbose_name_plural = _('Project reports')
class CustomerReport (Report):
target = models.ForeignKey (Customer, related_name='reports')
class Meta:
verbose_name = _('Customer report')
verbose_name_plural = _('Customer reports') | #return str(self.project) + (self.title if len(self.title) < 30 else (self.title[:27]+'...'))
| random_line_split |
allocation.py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print(list(zip(self.starts, self.sizes)))
print(start, size, new_size)
print(p, alloc_start, alloc_size)
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size) | self.dealloc(start, size)
return result
def dealloc(self, start, size):
"""Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
"""
assert size >= 0
if size == 0:
return
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
"""Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
"""
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
"""Returns the amount of space unused, not including the final
free block.
:rtype: int
"""
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
"""Return the amount of space unused.
:rtype: int
"""
if not self.starts:
return self.capacity
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
"""Return fraction of capacity currently allocated.
:rtype: float
"""
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
"""Return fraction of free space that is not expandable.
:rtype: float
"""
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
return not self.starts
def __str__(self):
return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self)) | random_line_split |
|
allocation.py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print(list(zip(self.starts, self.sizes)))
print(start, size, new_size)
print(p, alloc_start, alloc_size)
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size)
self.dealloc(start, size)
return result
def dealloc(self, start, size):
"""Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
"""
assert size >= 0
if size == 0:
return
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
"""Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
"""
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
"""Returns the amount of space unused, not including the final
free block.
:rtype: int
"""
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
"""Return the amount of space unused.
:rtype: int
"""
if not self.starts:
|
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
"""Return fraction of capacity currently allocated.
:rtype: float
"""
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
"""Return fraction of free space that is not expandable.
:rtype: float
"""
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
return not self.starts
def __str__(self):
return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
| return self.capacity | conditional_block |
allocation.py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class | (Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print(list(zip(self.starts, self.sizes)))
print(start, size, new_size)
print(p, alloc_start, alloc_size)
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size)
self.dealloc(start, size)
return result
def dealloc(self, start, size):
"""Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
"""
assert size >= 0
if size == 0:
return
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
"""Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
"""
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
"""Returns the amount of space unused, not including the final
free block.
:rtype: int
"""
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
"""Return the amount of space unused.
:rtype: int
"""
if not self.starts:
return self.capacity
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
"""Return fraction of capacity currently allocated.
:rtype: float
"""
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
"""Return fraction of free space that is not expandable.
:rtype: float
"""
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
return not self.starts
def __str__(self):
return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
| AllocatorMemoryException | identifier_name |
allocation.py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print(list(zip(self.starts, self.sizes)))
print(start, size, new_size)
print(p, alloc_start, alloc_size)
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size)
self.dealloc(start, size)
return result
def dealloc(self, start, size):
"""Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
"""
assert size >= 0
if size == 0:
return
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
"""Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
"""
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
"""Returns the amount of space unused, not including the final
free block.
:rtype: int
"""
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
"""Return the amount of space unused.
:rtype: int
"""
if not self.starts:
return self.capacity
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
"""Return fraction of capacity currently allocated.
:rtype: float
"""
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
"""Return fraction of free space that is not expandable.
:rtype: float
"""
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
|
def __str__(self):
return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
| return not self.starts | identifier_body |
test_mlresult.py | import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory
def head_of_file(path, n):
with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def test_imwrite(self):
| with TemporaryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path = os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff') | identifier_body |
|
test_mlresult.py | import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory | with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def test_imwrite(self):
with TemporaryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path = os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff') |
def head_of_file(path, n): | random_line_split |
test_mlresult.py | import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory
def head_of_file(path, n):
with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def | (self):
with TemporaryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path = os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff')
| test_imwrite | identifier_name |
test_sequence.py | import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casing(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format(s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def test_str(self):
s = seqpoet.Sequence(self.seq1)
assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s) == '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(ValueError)
def test_illegal_characters(self):
| s = seqpoet.Sequence(self.illegal) | identifier_body |
|
test_sequence.py | import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casing(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format(s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def test_str(self): | assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s) == '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(ValueError)
def test_illegal_characters(self):
s = seqpoet.Sequence(self.illegal) | s = seqpoet.Sequence(self.seq1) | random_line_split |
test_sequence.py | import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casing(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format(s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def | (self):
s = seqpoet.Sequence(self.seq1)
assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s) == '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(ValueError)
def test_illegal_characters(self):
s = seqpoet.Sequence(self.illegal)
| test_str | identifier_name |
Result.ts | import { Cache, StateObject } from "../../collections/stateful";
import { exists } from "../../util/object";
import { parseXSD, XSDSchema } from "../../util/SAXParser";
import { DFULogicalFile } from "../services/WsDFU";
import { ECLResult, ECLSchemas, Service, WUResultRequest, WUResultResponse } from "../services/WsWorkunits";
export interface ECLResultEx extends ECLResult {
Wuid: string;
ResultViews: any[];
}
export class Result extends StateObject<ECLResultEx & DFULogicalFile, ECLResultEx | DFULogicalFile> implements ECLResultEx {
protected connection: Service;
protected xsdSchema: XSDSchema;
get properties(): ECLResult { return this.get(); }
get Wuid(): string { return this.get("Wuid"); }
get Name(): string { return this.get("Name"); }
get Sequence(): number { return this.get("Sequence"); }
get Value(): string { return this.get("Value"); }
get Link(): string { return this.get("Link"); }
get FileName(): string { return this.get("FileName"); }
get IsSupplied(): boolean { return this.get("IsSupplied"); }
get ShowFileContent() { return this.get("ShowFileContent"); }
get | (): number { return this.get("Total"); }
get ECLSchemas(): ECLSchemas { return this.get("ECLSchemas"); }
get NodeGroup(): string { return this.get("NodeGroup"); }
get ResultViews(): any[] { return this.get("ResultViews"); }
constructor(connection: Service | string, wuid: string, eclResult: ECLResult, resultViews: any[]) {
super();
if (connection instanceof Service) {
this.connection = connection;
} else {
this.connection = new Service(connection);
}
this.set({
Wuid: wuid,
ResultViews: resultViews,
...eclResult
});
}
isComplete() {
return this.Total !== -1;
}
fetchXMLSchema(): Promise<XSDSchema> {
if (this.xsdSchema) {
return Promise.resolve(this.xsdSchema);
}
return this.WUResult().then((response) => {
if (exists("Result.XmlSchema.xml", response)) {
this.xsdSchema = parseXSD(response.Result.XmlSchema.xml);
return this.xsdSchema;
}
return this;
});
}
fetchResult(): Promise<any[]> {
return this.WUResult(0, -1, true).then((response) => {
if (exists("Result.Row", response)) {
return response.Result.Row;
}
return [];
});
}
protected WUResult(start: number = 0, count: number = 1, suppressXmlSchema: boolean = false): Promise<WUResultResponse> {
const request: WUResultRequest = <WUResultRequest>{};
if (this.Wuid && this.Sequence !== undefined) {
request.Wuid = this.Wuid;
request.Sequence = this.Sequence;
} else if (this.Name && this.NodeGroup) {
request.LogicalName = this.Name;
request.Cluster = this.NodeGroup;
} else if (this.Name) {
request.LogicalName = this.Name;
}
request.Start = start;
request.Count = count;
request.SuppressXmlSchema = suppressXmlSchema;
return this.connection.WUResult(request).then((response) => {
return response;
});
}
}
export class ResultCache extends Cache<ECLResult, Result> {
constructor() {
super((obj) => {
return Cache.hash([obj.Sequence, obj.Name, obj.FileName]);
});
}
}
| Total | identifier_name |
Result.ts | import { Cache, StateObject } from "../../collections/stateful";
import { exists } from "../../util/object";
import { parseXSD, XSDSchema } from "../../util/SAXParser";
import { DFULogicalFile } from "../services/WsDFU";
import { ECLResult, ECLSchemas, Service, WUResultRequest, WUResultResponse } from "../services/WsWorkunits";
export interface ECLResultEx extends ECLResult {
Wuid: string;
ResultViews: any[];
}
export class Result extends StateObject<ECLResultEx & DFULogicalFile, ECLResultEx | DFULogicalFile> implements ECLResultEx {
protected connection: Service;
protected xsdSchema: XSDSchema;
get properties(): ECLResult { return this.get(); }
get Wuid(): string { return this.get("Wuid"); }
get Name(): string { return this.get("Name"); }
get Sequence(): number { return this.get("Sequence"); }
get Value(): string { return this.get("Value"); }
get Link(): string { return this.get("Link"); }
get FileName(): string { return this.get("FileName"); }
get IsSupplied(): boolean { return this.get("IsSupplied"); }
get ShowFileContent() { return this.get("ShowFileContent"); }
get Total(): number { return this.get("Total"); }
get ECLSchemas(): ECLSchemas |
get NodeGroup(): string { return this.get("NodeGroup"); }
get ResultViews(): any[] { return this.get("ResultViews"); }
constructor(connection: Service | string, wuid: string, eclResult: ECLResult, resultViews: any[]) {
super();
if (connection instanceof Service) {
this.connection = connection;
} else {
this.connection = new Service(connection);
}
this.set({
Wuid: wuid,
ResultViews: resultViews,
...eclResult
});
}
isComplete() {
return this.Total !== -1;
}
fetchXMLSchema(): Promise<XSDSchema> {
if (this.xsdSchema) {
return Promise.resolve(this.xsdSchema);
}
return this.WUResult().then((response) => {
if (exists("Result.XmlSchema.xml", response)) {
this.xsdSchema = parseXSD(response.Result.XmlSchema.xml);
return this.xsdSchema;
}
return this;
});
}
fetchResult(): Promise<any[]> {
return this.WUResult(0, -1, true).then((response) => {
if (exists("Result.Row", response)) {
return response.Result.Row;
}
return [];
});
}
protected WUResult(start: number = 0, count: number = 1, suppressXmlSchema: boolean = false): Promise<WUResultResponse> {
const request: WUResultRequest = <WUResultRequest>{};
if (this.Wuid && this.Sequence !== undefined) {
request.Wuid = this.Wuid;
request.Sequence = this.Sequence;
} else if (this.Name && this.NodeGroup) {
request.LogicalName = this.Name;
request.Cluster = this.NodeGroup;
} else if (this.Name) {
request.LogicalName = this.Name;
}
request.Start = start;
request.Count = count;
request.SuppressXmlSchema = suppressXmlSchema;
return this.connection.WUResult(request).then((response) => {
return response;
});
}
}
export class ResultCache extends Cache<ECLResult, Result> {
constructor() {
super((obj) => {
return Cache.hash([obj.Sequence, obj.Name, obj.FileName]);
});
}
}
| { return this.get("ECLSchemas"); } | identifier_body |
Result.ts | import { Cache, StateObject } from "../../collections/stateful";
import { exists } from "../../util/object";
import { parseXSD, XSDSchema } from "../../util/SAXParser";
import { DFULogicalFile } from "../services/WsDFU";
import { ECLResult, ECLSchemas, Service, WUResultRequest, WUResultResponse } from "../services/WsWorkunits";
export interface ECLResultEx extends ECLResult {
Wuid: string;
ResultViews: any[];
}
export class Result extends StateObject<ECLResultEx & DFULogicalFile, ECLResultEx | DFULogicalFile> implements ECLResultEx {
protected connection: Service;
protected xsdSchema: XSDSchema;
get properties(): ECLResult { return this.get(); }
get Wuid(): string { return this.get("Wuid"); }
get Name(): string { return this.get("Name"); }
get Sequence(): number { return this.get("Sequence"); }
get Value(): string { return this.get("Value"); }
get Link(): string { return this.get("Link"); }
get FileName(): string { return this.get("FileName"); }
get IsSupplied(): boolean { return this.get("IsSupplied"); }
get ShowFileContent() { return this.get("ShowFileContent"); }
get Total(): number { return this.get("Total"); }
get ECLSchemas(): ECLSchemas { return this.get("ECLSchemas"); }
get NodeGroup(): string { return this.get("NodeGroup"); }
get ResultViews(): any[] { return this.get("ResultViews"); }
constructor(connection: Service | string, wuid: string, eclResult: ECLResult, resultViews: any[]) {
super();
if (connection instanceof Service) {
this.connection = connection;
} else {
this.connection = new Service(connection);
}
this.set({
Wuid: wuid,
ResultViews: resultViews,
...eclResult
});
}
isComplete() {
return this.Total !== -1;
}
fetchXMLSchema(): Promise<XSDSchema> {
if (this.xsdSchema) |
return this.WUResult().then((response) => {
if (exists("Result.XmlSchema.xml", response)) {
this.xsdSchema = parseXSD(response.Result.XmlSchema.xml);
return this.xsdSchema;
}
return this;
});
}
fetchResult(): Promise<any[]> {
return this.WUResult(0, -1, true).then((response) => {
if (exists("Result.Row", response)) {
return response.Result.Row;
}
return [];
});
}
protected WUResult(start: number = 0, count: number = 1, suppressXmlSchema: boolean = false): Promise<WUResultResponse> {
const request: WUResultRequest = <WUResultRequest>{};
if (this.Wuid && this.Sequence !== undefined) {
request.Wuid = this.Wuid;
request.Sequence = this.Sequence;
} else if (this.Name && this.NodeGroup) {
request.LogicalName = this.Name;
request.Cluster = this.NodeGroup;
} else if (this.Name) {
request.LogicalName = this.Name;
}
request.Start = start;
request.Count = count;
request.SuppressXmlSchema = suppressXmlSchema;
return this.connection.WUResult(request).then((response) => {
return response;
});
}
}
export class ResultCache extends Cache<ECLResult, Result> {
constructor() {
super((obj) => {
return Cache.hash([obj.Sequence, obj.Name, obj.FileName]);
});
}
}
| {
return Promise.resolve(this.xsdSchema);
} | conditional_block |
Result.ts | import { Cache, StateObject } from "../../collections/stateful";
import { exists } from "../../util/object";
import { parseXSD, XSDSchema } from "../../util/SAXParser";
import { DFULogicalFile } from "../services/WsDFU";
import { ECLResult, ECLSchemas, Service, WUResultRequest, WUResultResponse } from "../services/WsWorkunits";
export interface ECLResultEx extends ECLResult {
Wuid: string;
ResultViews: any[];
}
export class Result extends StateObject<ECLResultEx & DFULogicalFile, ECLResultEx | DFULogicalFile> implements ECLResultEx {
protected connection: Service;
protected xsdSchema: XSDSchema;
get properties(): ECLResult { return this.get(); }
get Wuid(): string { return this.get("Wuid"); }
get Name(): string { return this.get("Name"); }
get Sequence(): number { return this.get("Sequence"); }
get Value(): string { return this.get("Value"); }
get Link(): string { return this.get("Link"); }
get FileName(): string { return this.get("FileName"); }
get IsSupplied(): boolean { return this.get("IsSupplied"); }
get ShowFileContent() { return this.get("ShowFileContent"); }
get Total(): number { return this.get("Total"); }
get ECLSchemas(): ECLSchemas { return this.get("ECLSchemas"); }
get NodeGroup(): string { return this.get("NodeGroup"); }
get ResultViews(): any[] { return this.get("ResultViews"); }
constructor(connection: Service | string, wuid: string, eclResult: ECLResult, resultViews: any[]) {
super();
if (connection instanceof Service) {
this.connection = connection;
} else {
this.connection = new Service(connection);
}
this.set({
Wuid: wuid, | }
isComplete() {
return this.Total !== -1;
}
fetchXMLSchema(): Promise<XSDSchema> {
if (this.xsdSchema) {
return Promise.resolve(this.xsdSchema);
}
return this.WUResult().then((response) => {
if (exists("Result.XmlSchema.xml", response)) {
this.xsdSchema = parseXSD(response.Result.XmlSchema.xml);
return this.xsdSchema;
}
return this;
});
}
fetchResult(): Promise<any[]> {
return this.WUResult(0, -1, true).then((response) => {
if (exists("Result.Row", response)) {
return response.Result.Row;
}
return [];
});
}
protected WUResult(start: number = 0, count: number = 1, suppressXmlSchema: boolean = false): Promise<WUResultResponse> {
const request: WUResultRequest = <WUResultRequest>{};
if (this.Wuid && this.Sequence !== undefined) {
request.Wuid = this.Wuid;
request.Sequence = this.Sequence;
} else if (this.Name && this.NodeGroup) {
request.LogicalName = this.Name;
request.Cluster = this.NodeGroup;
} else if (this.Name) {
request.LogicalName = this.Name;
}
request.Start = start;
request.Count = count;
request.SuppressXmlSchema = suppressXmlSchema;
return this.connection.WUResult(request).then((response) => {
return response;
});
}
}
export class ResultCache extends Cache<ECLResult, Result> {
constructor() {
super((obj) => {
return Cache.hash([obj.Sequence, obj.Name, obj.FileName]);
});
}
} | ResultViews: resultViews,
...eclResult
}); | random_line_split |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> { | let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len() != 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32) != 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs) != 0 {
return ifaces;
}
let mut addr = addrs;
while !addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if !found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while !cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while !ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
} | random_line_split |
|
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn | (a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len() != 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32) != 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs) != 0 {
return ifaces;
}
let mut addr = addrs;
while !addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if !found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while !cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while !ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| new | identifier_name |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len() != 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32) != 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET | else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs) != 0 {
return ifaces;
}
let mut addr = addrs;
while !addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if !found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while !cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while !ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} | conditional_block |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) |
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len() != 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32) != 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs) != 0 {
return ifaces;
}
let mut addr = addrs;
while !addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if !found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while !cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while !ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| {
(self.0, self.1, self.2, self.3, self.4, self.5)
} | identifier_body |
context.py | """
JSON-LD contexts.
"""
from .namespaces import ns_mgr, D
base = {
"@base": str(D),
"a": "@type",
"uri": "@id",
"label": "rdfs:label",
}
#set namespaces from the ns_mgr |
#BCITE publications
publication = {
"title": "rdfs:label",
"authors": "bcite:authorList",
'pmid': 'bcite:pmid',
'doi': 'bcite:doi',
"pmcid": "bcite:pmcid",
"issue": "bcite:issue",
"volume": "bcite:volume",
"issn": "bcite:issn",
"eissn": "bcite:eissn",
"book": "bcite:book",
"published_in": "bcite:publishedIn",
"date": {
"@id": "bcite:date",
"@type": "http://www.w3.org/2001/XMLSchema#date"
},
"url": "bcite:url",
"pages": "bcite:pages",
"venue": {
"@id": "bcite:hasVenue",
"@type": "@id",
},
"contributor": {
"@id": "bcite:hasContributor",
"@type": "@id",
}
}
#Brown delegate editors
delegate = {
"first": "foaf:firstName",
"last": "foaf:lastName",
"short_id": "blocal:shortId",
"ou": "blocal:orgUnit",
"netId": "blocal:netId",
} | for prefix, iri in ns_mgr.namespaces():
base[prefix] = iri.toPython() | random_line_split |
context.py | """
JSON-LD contexts.
"""
from .namespaces import ns_mgr, D
base = {
"@base": str(D),
"a": "@type",
"uri": "@id",
"label": "rdfs:label",
}
#set namespaces from the ns_mgr
for prefix, iri in ns_mgr.namespaces():
|
#BCITE publications
publication = {
"title": "rdfs:label",
"authors": "bcite:authorList",
'pmid': 'bcite:pmid',
'doi': 'bcite:doi',
"pmcid": "bcite:pmcid",
"issue": "bcite:issue",
"volume": "bcite:volume",
"issn": "bcite:issn",
"eissn": "bcite:eissn",
"book": "bcite:book",
"published_in": "bcite:publishedIn",
"date": {
"@id": "bcite:date",
"@type": "http://www.w3.org/2001/XMLSchema#date"
},
"url": "bcite:url",
"pages": "bcite:pages",
"venue": {
"@id": "bcite:hasVenue",
"@type": "@id",
},
"contributor": {
"@id": "bcite:hasContributor",
"@type": "@id",
}
}
#Brown delegate editors
delegate = {
"first": "foaf:firstName",
"last": "foaf:lastName",
"short_id": "blocal:shortId",
"ou": "blocal:orgUnit",
"netId": "blocal:netId",
}
| base[prefix] = iri.toPython() | conditional_block |
test_pnutils.py | # Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.filter.matchedfilter module
"""
import unittest
import numpy
from pycbc.pnutils import *
from pycbc.scheme import *
from utils import parse_args_cpu_only, simple_exit
# We only need CPU tests
parse_args_cpu_only("PN Utilities")
class TestUtils(unittest.TestCase):
def test_mass1_mass2_to_tau0_tau3(self):
result = mass1_mass2_to_tau0_tau3(3.0,5.0,15.0)
answer = (63.039052988077955, 2.353532999897545)
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mtotal_eta(self):
result = tau0_tau3_to_mtotal_eta(93.84928959285253,2.9198487498891126,20.0)
answer = [5., 4.*1./5./5.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mass1_mass2(self):
result = tau0_tau3_to_mass1_mass2(12.410035910174642,0.9266455525603574,30.0)
answer = [6., 2.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mtotal_eta(self):
result = mass1_mass2_to_mtotal_eta(5,10)
answer = [15.0, 0.22222222222222221]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mchirp_eta(self):
result = mass1_mass2_to_mchirp_eta(5,10)
answer = [6.0836434189320574, 0.22222222222222224]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(self):
# with no spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(1.4, 1.4,
0., 0.)
for i in range(3):
self.assertAlmostEqual(result[i], 0, places=6)
# with spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(10., 1.4,
0.9, 0.1)
answer = [7.208723197, 3.251802285, 243.2697314]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(5., 5.,
0.5, -0.7)
answer = [-0.7833333333, 0.07250000000, -24.59479718]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
# using array arguments
mass1 = numpy.array([1.4, 10., 5., 5.])
mass2 = numpy.array([1.4, 1.4, 5., 5.])
spin1 = numpy.array([0., 0.9, 0.5, -0.7])
spin2 = numpy.array([0., 0.1, -0.7, 0.5])
answer = numpy.array([
[0., 0., 0.],
[7.208723197, 3.251802285, 243.2697314],
[-0.7833333333, 0.07250000000, -24.59479718],
[-0.7833333333, 0.07250000000, -24.59479718]
]).T
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2,
spin1, spin2)
for error in (result - answer).ravel():
self.assertAlmostEqual(error, 0, places=6)
suite = unittest.TestSuite() | if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results) | suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils))
| random_line_split |
test_pnutils.py | # Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.filter.matchedfilter module
"""
import unittest
import numpy
from pycbc.pnutils import *
from pycbc.scheme import *
from utils import parse_args_cpu_only, simple_exit
# We only need CPU tests
parse_args_cpu_only("PN Utilities")
class TestUtils(unittest.TestCase):
def | (self):
result = mass1_mass2_to_tau0_tau3(3.0,5.0,15.0)
answer = (63.039052988077955, 2.353532999897545)
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mtotal_eta(self):
result = tau0_tau3_to_mtotal_eta(93.84928959285253,2.9198487498891126,20.0)
answer = [5., 4.*1./5./5.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mass1_mass2(self):
result = tau0_tau3_to_mass1_mass2(12.410035910174642,0.9266455525603574,30.0)
answer = [6., 2.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mtotal_eta(self):
result = mass1_mass2_to_mtotal_eta(5,10)
answer = [15.0, 0.22222222222222221]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mchirp_eta(self):
result = mass1_mass2_to_mchirp_eta(5,10)
answer = [6.0836434189320574, 0.22222222222222224]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(self):
# with no spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(1.4, 1.4,
0., 0.)
for i in range(3):
self.assertAlmostEqual(result[i], 0, places=6)
# with spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(10., 1.4,
0.9, 0.1)
answer = [7.208723197, 3.251802285, 243.2697314]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(5., 5.,
0.5, -0.7)
answer = [-0.7833333333, 0.07250000000, -24.59479718]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
# using array arguments
mass1 = numpy.array([1.4, 10., 5., 5.])
mass2 = numpy.array([1.4, 1.4, 5., 5.])
spin1 = numpy.array([0., 0.9, 0.5, -0.7])
spin2 = numpy.array([0., 0.1, -0.7, 0.5])
answer = numpy.array([
[0., 0., 0.],
[7.208723197, 3.251802285, 243.2697314],
[-0.7833333333, 0.07250000000, -24.59479718],
[-0.7833333333, 0.07250000000, -24.59479718]
]).T
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2,
spin1, spin2)
for error in (result - answer).ravel():
self.assertAlmostEqual(error, 0, places=6)
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| test_mass1_mass2_to_tau0_tau3 | identifier_name |
test_pnutils.py | # Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.filter.matchedfilter module
"""
import unittest
import numpy
from pycbc.pnutils import *
from pycbc.scheme import *
from utils import parse_args_cpu_only, simple_exit
# We only need CPU tests
parse_args_cpu_only("PN Utilities")
class TestUtils(unittest.TestCase):
def test_mass1_mass2_to_tau0_tau3(self):
result = mass1_mass2_to_tau0_tau3(3.0,5.0,15.0)
answer = (63.039052988077955, 2.353532999897545)
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mtotal_eta(self):
result = tau0_tau3_to_mtotal_eta(93.84928959285253,2.9198487498891126,20.0)
answer = [5., 4.*1./5./5.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mass1_mass2(self):
result = tau0_tau3_to_mass1_mass2(12.410035910174642,0.9266455525603574,30.0)
answer = [6., 2.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mtotal_eta(self):
result = mass1_mass2_to_mtotal_eta(5,10)
answer = [15.0, 0.22222222222222221]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mchirp_eta(self):
result = mass1_mass2_to_mchirp_eta(5,10)
answer = [6.0836434189320574, 0.22222222222222224]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(self):
# with no spin
|
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(1.4, 1.4,
0., 0.)
for i in range(3):
self.assertAlmostEqual(result[i], 0, places=6)
# with spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(10., 1.4,
0.9, 0.1)
answer = [7.208723197, 3.251802285, 243.2697314]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(5., 5.,
0.5, -0.7)
answer = [-0.7833333333, 0.07250000000, -24.59479718]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
# using array arguments
mass1 = numpy.array([1.4, 10., 5., 5.])
mass2 = numpy.array([1.4, 1.4, 5., 5.])
spin1 = numpy.array([0., 0.9, 0.5, -0.7])
spin2 = numpy.array([0., 0.1, -0.7, 0.5])
answer = numpy.array([
[0., 0., 0.],
[7.208723197, 3.251802285, 243.2697314],
[-0.7833333333, 0.07250000000, -24.59479718],
[-0.7833333333, 0.07250000000, -24.59479718]
]).T
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2,
spin1, spin2)
for error in (result - answer).ravel():
self.assertAlmostEqual(error, 0, places=6) | identifier_body |
test_pnutils.py | # Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.filter.matchedfilter module
"""
import unittest
import numpy
from pycbc.pnutils import *
from pycbc.scheme import *
from utils import parse_args_cpu_only, simple_exit
# We only need CPU tests
parse_args_cpu_only("PN Utilities")
class TestUtils(unittest.TestCase):
def test_mass1_mass2_to_tau0_tau3(self):
result = mass1_mass2_to_tau0_tau3(3.0,5.0,15.0)
answer = (63.039052988077955, 2.353532999897545)
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mtotal_eta(self):
result = tau0_tau3_to_mtotal_eta(93.84928959285253,2.9198487498891126,20.0)
answer = [5., 4.*1./5./5.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_tau0_tau3_to_mass1_mass2(self):
result = tau0_tau3_to_mass1_mass2(12.410035910174642,0.9266455525603574,30.0)
answer = [6., 2.]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mtotal_eta(self):
result = mass1_mass2_to_mtotal_eta(5,10)
answer = [15.0, 0.22222222222222221]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_to_mchirp_eta(self):
result = mass1_mass2_to_mchirp_eta(5,10)
answer = [6.0836434189320574, 0.22222222222222224]
self.assertAlmostEqual(result[0]/answer[0],1,places=6)
self.assertAlmostEqual(result[1]/answer[1],1,places=6)
def test_mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(self):
# with no spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(1.4, 1.4,
0., 0.)
for i in range(3):
|
# with spin
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(10., 1.4,
0.9, 0.1)
answer = [7.208723197, 3.251802285, 243.2697314]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(5., 5.,
0.5, -0.7)
answer = [-0.7833333333, 0.07250000000, -24.59479718]
for r, a in zip(result, answer):
self.assertAlmostEqual(r / a, 1, places=6)
# using array arguments
mass1 = numpy.array([1.4, 10., 5., 5.])
mass2 = numpy.array([1.4, 1.4, 5., 5.])
spin1 = numpy.array([0., 0.9, 0.5, -0.7])
spin2 = numpy.array([0., 0.1, -0.7, 0.5])
answer = numpy.array([
[0., 0., 0.],
[7.208723197, 3.251802285, 243.2697314],
[-0.7833333333, 0.07250000000, -24.59479718],
[-0.7833333333, 0.07250000000, -24.59479718]
]).T
result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2,
spin1, spin2)
for error in (result - answer).ravel():
self.assertAlmostEqual(error, 0, places=6)
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| self.assertAlmostEqual(result[i], 0, places=6) | conditional_block |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn | () -> Self {
Self {}
}
}
}
| new | identifier_name |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self |
}
}
| {
Self {}
} | identifier_body |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
} | } | random_line_split |
|
mod.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn | (bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c != Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| decode | identifier_name |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c != Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap(); | }
}
assert_eq!(constant_offsets, expected_constant_offsets);
} | decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr)); | random_line_split |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> |
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c != Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
} | identifier_body |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c != Ordering::Equal | else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| {
c
} | conditional_block |
subscription_manifest.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Kofink <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
- Upload, refresh and delete Subscription Manifests
author: "Andrew Kofink (@akofink)"
options:
manifest_path:
description:
- Path to the manifest zip file
- This parameter will be ignored if I(state=absent) or I(state=refreshed)
type: path
state:
description:
- The state of the manifest
default: present
choices:
- absent
- present
- refreshed
type: str
repository_url:
description:
- URL to retrieve content from
aliases: [ redhat_repository_url ]
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.organization
'''
EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
theforeman.foreman.subscription_manifest:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
organization: "Default Organization"
state: present
manifest_path: "/tmp/manifest.zip"
'''
RETURN = ''' # '''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule
def | ():
module = KatelloEntityAnsibleModule(
argument_spec=dict(
manifest_path=dict(type='path'),
state=dict(default='present', choices=['absent', 'present', 'refreshed']),
repository_url=dict(aliases=['redhat_repository_url']),
),
foreman_spec=dict(
organization=dict(type='entity', required=True, thin=False),
),
required_if=[
['state', 'present', ['manifest_path']],
],
supports_check_mode=False,
)
module.task_timeout = 5 * 60
with module.api_connection():
organization = module.lookup_entity('organization')
scope = module.scope_for('organization')
try:
existing_manifest = organization['owner_details']['upstreamConsumer']
except KeyError:
existing_manifest = None
if module.state == 'present':
if 'repository_url' in module.foreman_params:
payload = {'redhat_repository_url': module.foreman_params['repository_url']}
org_spec = dict(id=dict(), redhat_repository_url=dict())
organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)
try:
with open(module.foreman_params['manifest_path'], 'rb') as manifest_file:
files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
params = {}
if 'repository_url' in module.foreman_params:
params['repository_url'] = module.foreman_params['repository_url']
params.update(scope)
result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
for error in result['humanized']['errors']:
if "same as existing data" in error:
# Nothing changed, but everything ok
break
if "older than existing data" in error:
module.fail_json(msg="Manifest is older than existing data.")
else:
module.fail_json(msg="Upload of the manifest failed: %s" % error)
else:
module.set_changed()
except IOError as e:
module.fail_json(msg="Unable to read the manifest file: %s" % e)
elif module.desired_absent and existing_manifest:
module.resource_action('subscriptions', 'delete_manifest', scope)
elif module.state == 'refreshed':
if existing_manifest:
module.resource_action('subscriptions', 'refresh_manifest', scope)
else:
module.fail_json(msg="No manifest found to refresh.")
if __name__ == '__main__':
main()
| main | identifier_name |
subscription_manifest.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Kofink <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
- Upload, refresh and delete Subscription Manifests
author: "Andrew Kofink (@akofink)"
options:
manifest_path:
description:
- Path to the manifest zip file
- This parameter will be ignored if I(state=absent) or I(state=refreshed)
type: path
state:
description:
- The state of the manifest
default: present
choices:
- absent
- present
- refreshed
type: str
repository_url:
description:
- URL to retrieve content from
aliases: [ redhat_repository_url ]
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.organization
'''
EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
theforeman.foreman.subscription_manifest:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
organization: "Default Organization"
state: present
manifest_path: "/tmp/manifest.zip"
'''
RETURN = ''' # '''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule
def main():
module = KatelloEntityAnsibleModule(
argument_spec=dict(
manifest_path=dict(type='path'),
state=dict(default='present', choices=['absent', 'present', 'refreshed']),
repository_url=dict(aliases=['redhat_repository_url']),
),
foreman_spec=dict(
organization=dict(type='entity', required=True, thin=False),
),
required_if=[
['state', 'present', ['manifest_path']],
],
supports_check_mode=False,
)
module.task_timeout = 5 * 60
with module.api_connection():
organization = module.lookup_entity('organization')
scope = module.scope_for('organization')
try:
existing_manifest = organization['owner_details']['upstreamConsumer']
except KeyError:
existing_manifest = None
if module.state == 'present':
if 'repository_url' in module.foreman_params:
payload = {'redhat_repository_url': module.foreman_params['repository_url']}
org_spec = dict(id=dict(), redhat_repository_url=dict())
organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)
try:
with open(module.foreman_params['manifest_path'], 'rb') as manifest_file: | files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
params = {}
if 'repository_url' in module.foreman_params:
params['repository_url'] = module.foreman_params['repository_url']
params.update(scope)
result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
for error in result['humanized']['errors']:
if "same as existing data" in error:
# Nothing changed, but everything ok
break
if "older than existing data" in error:
module.fail_json(msg="Manifest is older than existing data.")
else:
module.fail_json(msg="Upload of the manifest failed: %s" % error)
else:
module.set_changed()
except IOError as e:
module.fail_json(msg="Unable to read the manifest file: %s" % e)
elif module.desired_absent and existing_manifest:
module.resource_action('subscriptions', 'delete_manifest', scope)
elif module.state == 'refreshed':
if existing_manifest:
module.resource_action('subscriptions', 'refresh_manifest', scope)
else:
module.fail_json(msg="No manifest found to refresh.")
if __name__ == '__main__':
main() | random_line_split |
|
subscription_manifest.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Kofink <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
- Upload, refresh and delete Subscription Manifests
author: "Andrew Kofink (@akofink)"
options:
manifest_path:
description:
- Path to the manifest zip file
- This parameter will be ignored if I(state=absent) or I(state=refreshed)
type: path
state:
description:
- The state of the manifest
default: present
choices:
- absent
- present
- refreshed
type: str
repository_url:
description:
- URL to retrieve content from
aliases: [ redhat_repository_url ]
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.organization
'''
EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
theforeman.foreman.subscription_manifest:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
organization: "Default Organization"
state: present
manifest_path: "/tmp/manifest.zip"
'''
RETURN = ''' # '''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule
def main():
module = KatelloEntityAnsibleModule(
argument_spec=dict(
manifest_path=dict(type='path'),
state=dict(default='present', choices=['absent', 'present', 'refreshed']),
repository_url=dict(aliases=['redhat_repository_url']),
),
foreman_spec=dict(
organization=dict(type='entity', required=True, thin=False),
),
required_if=[
['state', 'present', ['manifest_path']],
],
supports_check_mode=False,
)
module.task_timeout = 5 * 60
with module.api_connection():
organization = module.lookup_entity('organization')
scope = module.scope_for('organization')
try:
existing_manifest = organization['owner_details']['upstreamConsumer']
except KeyError:
existing_manifest = None
if module.state == 'present':
if 'repository_url' in module.foreman_params:
payload = {'redhat_repository_url': module.foreman_params['repository_url']}
org_spec = dict(id=dict(), redhat_repository_url=dict())
organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)
try:
with open(module.foreman_params['manifest_path'], 'rb') as manifest_file:
files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
params = {}
if 'repository_url' in module.foreman_params:
params['repository_url'] = module.foreman_params['repository_url']
params.update(scope)
result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
for error in result['humanized']['errors']:
if "same as existing data" in error:
# Nothing changed, but everything ok
break
if "older than existing data" in error:
|
else:
module.fail_json(msg="Upload of the manifest failed: %s" % error)
else:
module.set_changed()
except IOError as e:
module.fail_json(msg="Unable to read the manifest file: %s" % e)
elif module.desired_absent and existing_manifest:
module.resource_action('subscriptions', 'delete_manifest', scope)
elif module.state == 'refreshed':
if existing_manifest:
module.resource_action('subscriptions', 'refresh_manifest', scope)
else:
module.fail_json(msg="No manifest found to refresh.")
if __name__ == '__main__':
main()
| module.fail_json(msg="Manifest is older than existing data.") | conditional_block |
subscription_manifest.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Andrew Kofink <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
- Upload, refresh and delete Subscription Manifests
author: "Andrew Kofink (@akofink)"
options:
manifest_path:
description:
- Path to the manifest zip file
- This parameter will be ignored if I(state=absent) or I(state=refreshed)
type: path
state:
description:
- The state of the manifest
default: present
choices:
- absent
- present
- refreshed
type: str
repository_url:
description:
- URL to retrieve content from
aliases: [ redhat_repository_url ]
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.organization
'''
EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
theforeman.foreman.subscription_manifest:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
organization: "Default Organization"
state: present
manifest_path: "/tmp/manifest.zip"
'''
RETURN = ''' # '''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule
def main():
|
if __name__ == '__main__':
main()
| module = KatelloEntityAnsibleModule(
argument_spec=dict(
manifest_path=dict(type='path'),
state=dict(default='present', choices=['absent', 'present', 'refreshed']),
repository_url=dict(aliases=['redhat_repository_url']),
),
foreman_spec=dict(
organization=dict(type='entity', required=True, thin=False),
),
required_if=[
['state', 'present', ['manifest_path']],
],
supports_check_mode=False,
)
module.task_timeout = 5 * 60
with module.api_connection():
organization = module.lookup_entity('organization')
scope = module.scope_for('organization')
try:
existing_manifest = organization['owner_details']['upstreamConsumer']
except KeyError:
existing_manifest = None
if module.state == 'present':
if 'repository_url' in module.foreman_params:
payload = {'redhat_repository_url': module.foreman_params['repository_url']}
org_spec = dict(id=dict(), redhat_repository_url=dict())
organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)
try:
with open(module.foreman_params['manifest_path'], 'rb') as manifest_file:
files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
params = {}
if 'repository_url' in module.foreman_params:
params['repository_url'] = module.foreman_params['repository_url']
params.update(scope)
result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
for error in result['humanized']['errors']:
if "same as existing data" in error:
# Nothing changed, but everything ok
break
if "older than existing data" in error:
module.fail_json(msg="Manifest is older than existing data.")
else:
module.fail_json(msg="Upload of the manifest failed: %s" % error)
else:
module.set_changed()
except IOError as e:
module.fail_json(msg="Unable to read the manifest file: %s" % e)
elif module.desired_absent and existing_manifest:
module.resource_action('subscriptions', 'delete_manifest', scope)
elif module.state == 'refreshed':
if existing_manifest:
module.resource_action('subscriptions', 'refresh_manifest', scope)
else:
module.fail_json(msg="No manifest found to refresh.") | identifier_body |
scripts.js | exports.BattleScripts = {
init: function() {
for (var i in this.data.Pokedex) {
var template = this.getTemplate(i);
var newStats = {
hp: template.id === 'shedinja' ? 1 : this.clampIntRange(150 - template.baseStats.hp, 5, 145),
atk: this.clampIntRange(150 - template.baseStats.atk, 5, 145),
def: this.clampIntRange(150 - template.baseStats.def, 5, 145),
spa: this.clampIntRange(150 - template.baseStats.spa, 5, 145),
spd: this.clampIntRange(150 - template.baseStats.spd, 5, 145),
spe: this.clampIntRange(150 - template.baseStats.spe, 5, 145)
}
this.modData('Pokedex', i).baseStats = newStats; | }; | }
} | random_line_split |
tail_boom_flex.py | " tail boom flexibility "
from numpy import pi
from gpkit import Model, parse_variables, SignomialsEnabled
class TailBoomFlexibility(Model):
""" Tail Boom Flexibility Model
Variables
---------
Fne [-] tail boom flexibility factor
deda [-] wing downwash derivative
SMcorr 0.55 [-] corrected static margin
sph1 [-] flexibility helper variable 1
sph2 [-] flexibility helper variable 2
LaTex Strings
-------------
Fne F_{\mathrm{NE}}
deda d\\epsilon/d\\alpha
SMcorr SM_{\\mathrm{corr}}
"""
@parse_variables(__doc__, globals())
def | (self, htail, hbending, wing):
mh = htail.mh
mw = wing.mw
Vh = htail.Vh
th = hbending.th
CLhmin = htail.CLhmin
CLwmax = wing.planform.CLmax
Sw = wing.planform.S
bw = wing.planform.b
lh = htail.lh
CM = wing.planform.CM
constraints = [
Fne >= 1 + mh*th,
sph1*(mw*Fne/mh/Vh) + deda <= 1,
sph2 <= Vh*CLhmin/CLwmax,
# (sph1 + sph2).mono_lower_bound({"sph1": .48, "sph2": .52}) >= (
# SMcorr + wing["C_M"]/wing["C_{L_{max}}"]),
deda >= mw*Sw/bw/4/pi/lh]
with SignomialsEnabled():
constraints.extend([sph1 + sph2 >= SMcorr + CM/CLwmax])
return constraints
| setup | identifier_name |
tail_boom_flex.py | " tail boom flexibility "
from numpy import pi
from gpkit import Model, parse_variables, SignomialsEnabled
class TailBoomFlexibility(Model):
""" Tail Boom Flexibility Model
Variables
---------
Fne [-] tail boom flexibility factor
deda [-] wing downwash derivative
SMcorr 0.55 [-] corrected static margin
sph1 [-] flexibility helper variable 1
sph2 [-] flexibility helper variable 2
LaTex Strings
-------------
Fne F_{\mathrm{NE}}
deda d\\epsilon/d\\alpha
SMcorr SM_{\\mathrm{corr}}
"""
@parse_variables(__doc__, globals())
def setup(self, htail, hbending, wing):
mh = htail.mh
mw = wing.mw
Vh = htail.Vh
th = hbending.th
CLhmin = htail.CLhmin
CLwmax = wing.planform.CLmax | CM = wing.planform.CM
constraints = [
Fne >= 1 + mh*th,
sph1*(mw*Fne/mh/Vh) + deda <= 1,
sph2 <= Vh*CLhmin/CLwmax,
# (sph1 + sph2).mono_lower_bound({"sph1": .48, "sph2": .52}) >= (
# SMcorr + wing["C_M"]/wing["C_{L_{max}}"]),
deda >= mw*Sw/bw/4/pi/lh]
with SignomialsEnabled():
constraints.extend([sph1 + sph2 >= SMcorr + CM/CLwmax])
return constraints | Sw = wing.planform.S
bw = wing.planform.b
lh = htail.lh | random_line_split |
tail_boom_flex.py | " tail boom flexibility "
from numpy import pi
from gpkit import Model, parse_variables, SignomialsEnabled
class TailBoomFlexibility(Model):
""" Tail Boom Flexibility Model
Variables
---------
Fne [-] tail boom flexibility factor
deda [-] wing downwash derivative
SMcorr 0.55 [-] corrected static margin
sph1 [-] flexibility helper variable 1
sph2 [-] flexibility helper variable 2
LaTex Strings
-------------
Fne F_{\mathrm{NE}}
deda d\\epsilon/d\\alpha
SMcorr SM_{\\mathrm{corr}}
"""
@parse_variables(__doc__, globals())
def setup(self, htail, hbending, wing):
| mh = htail.mh
mw = wing.mw
Vh = htail.Vh
th = hbending.th
CLhmin = htail.CLhmin
CLwmax = wing.planform.CLmax
Sw = wing.planform.S
bw = wing.planform.b
lh = htail.lh
CM = wing.planform.CM
constraints = [
Fne >= 1 + mh*th,
sph1*(mw*Fne/mh/Vh) + deda <= 1,
sph2 <= Vh*CLhmin/CLwmax,
# (sph1 + sph2).mono_lower_bound({"sph1": .48, "sph2": .52}) >= (
# SMcorr + wing["C_M"]/wing["C_{L_{max}}"]),
deda >= mw*Sw/bw/4/pi/lh]
with SignomialsEnabled():
constraints.extend([sph1 + sph2 >= SMcorr + CM/CLwmax])
return constraints | identifier_body |
|
test_link_shared_libraries.py | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants_test.backend.native.tasks.native_task_test_base import (
NativeCompileTestMixin,
NativeTaskTestBase,
)
class | (NativeTaskTestBase, NativeCompileTestMixin):
@classmethod
def task_type(cls):
return LinkSharedLibraries
def test_caching(self):
cpp = self.create_simple_cpp_library(ctypes_native_library=NativeArtifact(lib_name="test"))
cpp_compile_task_type = self.synthesize_task_subtype(CppCompile, "cpp_compile_scope")
context = self.prepare_context_for_compile(
target_roots=[cpp],
for_task_types=[cpp_compile_task_type],
options={"libc": {"enable_libc_search": True}},
)
cpp_compile = cpp_compile_task_type(
context, os.path.join(self.pants_workdir, "cpp_compile")
)
cpp_compile.execute()
link_shared_libraries = self.create_task(context)
link_shared_libraries.execute()
link_shared_libraries.execute()
| LinkSharedLibrariesTest | identifier_name |
test_link_shared_libraries.py | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants_test.backend.native.tasks.native_task_test_base import (
NativeCompileTestMixin,
NativeTaskTestBase,
)
class LinkSharedLibrariesTest(NativeTaskTestBase, NativeCompileTestMixin):
@classmethod
def task_type(cls):
return LinkSharedLibraries
def test_caching(self):
| cpp = self.create_simple_cpp_library(ctypes_native_library=NativeArtifact(lib_name="test"))
cpp_compile_task_type = self.synthesize_task_subtype(CppCompile, "cpp_compile_scope")
context = self.prepare_context_for_compile(
target_roots=[cpp],
for_task_types=[cpp_compile_task_type],
options={"libc": {"enable_libc_search": True}},
)
cpp_compile = cpp_compile_task_type(
context, os.path.join(self.pants_workdir, "cpp_compile")
)
cpp_compile.execute()
link_shared_libraries = self.create_task(context)
link_shared_libraries.execute()
link_shared_libraries.execute() | identifier_body |
|
test_link_shared_libraries.py | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants_test.backend.native.tasks.native_task_test_base import (
NativeCompileTestMixin,
NativeTaskTestBase,
)
class LinkSharedLibrariesTest(NativeTaskTestBase, NativeCompileTestMixin):
@classmethod
def task_type(cls):
return LinkSharedLibraries
def test_caching(self):
cpp = self.create_simple_cpp_library(ctypes_native_library=NativeArtifact(lib_name="test"))
cpp_compile_task_type = self.synthesize_task_subtype(CppCompile, "cpp_compile_scope") | context = self.prepare_context_for_compile(
target_roots=[cpp],
for_task_types=[cpp_compile_task_type],
options={"libc": {"enable_libc_search": True}},
)
cpp_compile = cpp_compile_task_type(
context, os.path.join(self.pants_workdir, "cpp_compile")
)
cpp_compile.execute()
link_shared_libraries = self.create_task(context)
link_shared_libraries.execute()
link_shared_libraries.execute() | random_line_split |
|
ProcessDetector.test.ts | /*
* Copyright The OpenTelemetry Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as sinon from 'sinon';
import { processDetector, Resource } from '../../../src';
import {
assertEmptyResource,
} from '../../util/resource-assertions';
import { describeBrowser } from '../../util';
describeBrowser('processDetector() on web browser', () => { | sinon.restore();
});
it('should return empty resource', async () => {
const resource: Resource = await processDetector.detect();
assertEmptyResource(resource);
});
}); | afterEach(() => { | random_line_split |
AntlrParser.js | "use strict";
var antlr4 = require('antlr4/index');
var LambdaCalculusLexer = require("../antlr/generated/LambdaCalculusLexer");
var LambdaCalculusParser = require("../antlr/generated/LambdaCalculusParser");
var AstCreator = require("./ParseTreeListeningAstCreator").AstCreator;
var Immutable = require('immutable');
module.exports.AntlrParser = function () {
var self = {};
self.parse = function (input) {
var chars = new antlr4.InputStream(input);
var lexer = new LambdaCalculusLexer.LambdaCalculusLexer(chars);
var tokens = new antlr4.CommonTokenStream(lexer);
var parser = new LambdaCalculusParser.LambdaCalculusParser(tokens);
parser.buildParseTrees = true;
var tree = parser.expression();
var astCreator = new AstCreator();
var result = tree.accept(astCreator);
// todo - implement error handling
return Immutable.fromJS({
value: result,
status: true
});
};
return self; | }(); | random_line_split |
|
tileinfo.py | #!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols):
|
def main():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main())
| """
Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
for c, v in point.items():
stats[c].append(v)
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()} | identifier_body |
tileinfo.py | #!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols): | Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
for c, v in point.items():
stats[c].append(v)
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()}
def main():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main()) |
""" | random_line_split |
tileinfo.py | #!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols):
"""
Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
|
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()}
def main():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main())
| for c, v in point.items():
stats[c].append(v) | conditional_block |
tileinfo.py | #!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols):
"""
Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
for c, v in point.items():
stats[c].append(v)
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()}
def | ():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main())
| main | identifier_name |
urls.py | # Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from a10_horizon.dashboard.a10networks.a10appliances import views
urlpatterns = patterns(
'a10_horizon.dashboard.a10networks.a10appliances.views',
url(r'^$', views.IndexView.as_view(), name='index') | ) | # url(r'^deleteappliance$', views.DeleteApplianceView.as_view(), name='deleteappliance')
# url(r'^addimage$', views.AddImageView.as_view(), name="addimage") | random_line_split |
gru_cell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class GRUCell(BaseCell): | def __call__(self, inputs, state, scope=None):
""""""
with tf.variable_scope(scope or type(self).__name__):
cell_tm1, hidden_tm1 = tf.split(axis=1, num_or_size_splits=2, value=state)
with tf.variable_scope('Gates'):
linear = linalg.linear([inputs, hidden_tm1],
self.output_size,
add_bias=True,
n_splits=2,
moving_params=self.moving_params)
update_act, reset_act = linear
update_gate = linalg.sigmoid(update_act-self.forget_bias)
reset_gate = linalg.sigmoid(reset_act)
reset_state = reset_gate * hidden_tm1
with tf.variable_scope('Candidate'):
hidden_act = linalg.linear([inputs, reset_state],
self.output_size,
add_bias=True,
moving_params=self.moving_params)
hidden_tilde = self.recur_func(hidden_act)
cell_t = update_gate * cell_tm1 + (1-update_gate) * hidden_tilde
return cell_t, tf.concat(axis=1, values=[cell_t, cell_t])
#=============================================================
@property
def state_size(self):
return self.output_size * 2 | """"""
#============================================================= | random_line_split |
gru_cell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class GRUCell(BaseCell):
""""""
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
with tf.variable_scope(scope or type(self).__name__):
cell_tm1, hidden_tm1 = tf.split(axis=1, num_or_size_splits=2, value=state)
with tf.variable_scope('Gates'):
linear = linalg.linear([inputs, hidden_tm1],
self.output_size,
add_bias=True,
n_splits=2,
moving_params=self.moving_params)
update_act, reset_act = linear
update_gate = linalg.sigmoid(update_act-self.forget_bias)
reset_gate = linalg.sigmoid(reset_act)
reset_state = reset_gate * hidden_tm1
with tf.variable_scope('Candidate'):
hidden_act = linalg.linear([inputs, reset_state],
self.output_size,
add_bias=True,
moving_params=self.moving_params)
hidden_tilde = self.recur_func(hidden_act)
cell_t = update_gate * cell_tm1 + (1-update_gate) * hidden_tilde
return cell_t, tf.concat(axis=1, values=[cell_t, cell_t])
#=============================================================
@property
def state_size(self):
| return self.output_size * 2 | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.