file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
acceptancetest.py | import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
# return hash
return file, md5_hash_output
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False
def | ():
# writes a text file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdir("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main() | make_txt_file | identifier_name |
acceptancetest.py | import hashlib
import shutil
import os
from datetime import datetime
list_of_paths_and_strings = [
["assignment1.cpp", "main()"]
]
def main():
if acceptance_test():
make_txt_file()
zip_dir()
def get_md5_hash(file):
# opening file
|
def get_current_time():
print "The current time is " + " datetime.today()"
return datetime.today()
def acceptance_test():
# for each list of the list of paths and strings
# make sure that a file with that name exists within the folder
for my_list in list_of_paths_and_strings:
path = my_list[0]
list_of_strings = my_list[1:]
try:
with open(path) as file:
for string in list_of_strings:
if string in file.read():
print "Found " + string + " in file."
else:
print string + "not found in file."
return False
file.close()
return True
except:
print 'File does not exist. Please make sure all necessary files are in the correct place.'
return False
def make_txt_file():
# writes a text file with each of the hashes for each of the files using MD5
write_file = open("hash.txt", "w+")
write_file.write("Write time: " + str(get_current_time()) + '\n')
for file in os.listdir(os.getcwd()):
if "." in file:
f_name, file_hash = get_md5_hash(file)
write_file.write(f_name + '\n')
write_file.write(file_hash + '\n')
write_file.close()
def zip_dir():
# zips directory using shutil.make_archive()
zip_name = "submission"
directory_name = "./tmp"
os.mkdir("./tmp")
for file in os.listdir(os.getcwd()):
try:
if ".pdf" in file:
continue
elif "acceptancetest" in file:
continue
else:
shutil.copy(file, './tmp/')
except:
continue
shutil.make_archive(zip_name, 'zip', directory_name)
shutil.rmtree('./tmp')
if __name__ == '__main__':
main() | file_to_hash = open(file)
read_file = file_to_hash.read()
# get hash of file
md5_hash = hashlib.md5(read_file)
md5_hash_output = md5_hash.hexdigest()
# print file name and hash
print "File Name: %s" % file
print "MD5 Hash: %r" % md5_hash_output
# return hash
return file, md5_hash_output | identifier_body |
queue.js | "use strict";
var ASSERT = require("./assert");
function arrayMove(src, srcIndex, dst, dstIndex, len) {
for (var j = 0; j < len; ++j) {
dst[j + dstIndex] = src[j + srcIndex];
src[j + srcIndex] = void 0;
}
}
function Queue(capacity) |
Queue.prototype._willBeOverCapacity = function (size) {
return this._capacity < size;
};
Queue.prototype._pushOne = function (arg) {
var length = this.length();
this._checkCapacity(length + 1);
var i = (this._front + length) & (this._capacity - 1);
this[i] = arg;
this._length = length + 1;
};
Queue.prototype._unshiftOne = function(value) {
var capacity = this._capacity;
this._checkCapacity(this.length() + 1);
var front = this._front;
var i = (((( front - 1 ) &
( capacity - 1) ) ^ capacity ) - capacity );
this[i] = value;
this._front = i;
this._length = this.length() + 1;
};
Queue.prototype.unshift = function(fn, receiver, arg) {
this._unshiftOne(arg);
this._unshiftOne(receiver);
this._unshiftOne(fn);
};
Queue.prototype.push = function (fn, receiver, arg) {
ASSERT(arguments.length === 3);
ASSERT(typeof fn === "function");
var length = this.length() + 3;
if (this._willBeOverCapacity(length)) {
//The fast array copies expect the
//underlying array to be filled completely
this._pushOne(fn);
this._pushOne(receiver);
this._pushOne(arg);
return;
}
var j = this._front + length - 3;
this._checkCapacity(length);
var wrapMask = this._capacity - 1;
this[(j + 0) & wrapMask] = fn;
this[(j + 1) & wrapMask] = receiver;
this[(j + 2) & wrapMask] = arg;
this._length = length;
};
Queue.prototype.shift = function () {
ASSERT(this.length() > 0);
var front = this._front,
ret = this[front];
this[front] = undefined;
this._front = (front + 1) & (this._capacity - 1);
this._length--;
return ret;
};
Queue.prototype.length = function () {
return this._length;
};
Queue.prototype._checkCapacity = function (size) {
if (this._capacity < size) {
this._resizeTo(this._capacity << 1);
}
};
Queue.prototype._resizeTo = function (capacity) {
var oldCapacity = this._capacity;
this._capacity = capacity;
var front = this._front;
var length = this._length;
var moveItemsCount = (front + length) & (oldCapacity - 1);
arrayMove(this, 0, this, oldCapacity, moveItemsCount);
};
module.exports = Queue;
| {
this._capacity = capacity;
this._length = 0;
this._front = 0;
} | identifier_body |
queue.js | "use strict";
var ASSERT = require("./assert");
function arrayMove(src, srcIndex, dst, dstIndex, len) {
for (var j = 0; j < len; ++j) {
dst[j + dstIndex] = src[j + srcIndex];
src[j + srcIndex] = void 0;
}
}
function Queue(capacity) {
this._capacity = capacity;
this._length = 0;
this._front = 0;
}
Queue.prototype._willBeOverCapacity = function (size) {
return this._capacity < size;
};
Queue.prototype._pushOne = function (arg) {
var length = this.length();
this._checkCapacity(length + 1);
var i = (this._front + length) & (this._capacity - 1);
this[i] = arg;
this._length = length + 1;
};
Queue.prototype._unshiftOne = function(value) {
var capacity = this._capacity;
this._checkCapacity(this.length() + 1);
var front = this._front;
var i = (((( front - 1 ) &
( capacity - 1) ) ^ capacity ) - capacity );
this[i] = value;
this._front = i;
this._length = this.length() + 1;
};
Queue.prototype.unshift = function(fn, receiver, arg) {
this._unshiftOne(arg);
this._unshiftOne(receiver);
this._unshiftOne(fn);
};
Queue.prototype.push = function (fn, receiver, arg) {
ASSERT(arguments.length === 3);
ASSERT(typeof fn === "function");
var length = this.length() + 3;
if (this._willBeOverCapacity(length)) {
//The fast array copies expect the
//underlying array to be filled completely
this._pushOne(fn);
this._pushOne(receiver);
this._pushOne(arg);
return;
}
var j = this._front + length - 3;
this._checkCapacity(length);
var wrapMask = this._capacity - 1;
this[(j + 0) & wrapMask] = fn;
this[(j + 1) & wrapMask] = receiver;
this[(j + 2) & wrapMask] = arg;
this._length = length;
};
Queue.prototype.shift = function () {
ASSERT(this.length() > 0);
var front = this._front,
ret = this[front];
this[front] = undefined;
this._front = (front + 1) & (this._capacity - 1);
this._length--;
return ret;
};
Queue.prototype.length = function () {
return this._length;
};
Queue.prototype._checkCapacity = function (size) {
if (this._capacity < size) |
};
Queue.prototype._resizeTo = function (capacity) {
var oldCapacity = this._capacity;
this._capacity = capacity;
var front = this._front;
var length = this._length;
var moveItemsCount = (front + length) & (oldCapacity - 1);
arrayMove(this, 0, this, oldCapacity, moveItemsCount);
};
module.exports = Queue;
| {
this._resizeTo(this._capacity << 1);
} | conditional_block |
queue.js | "use strict";
var ASSERT = require("./assert");
function arrayMove(src, srcIndex, dst, dstIndex, len) {
for (var j = 0; j < len; ++j) {
dst[j + dstIndex] = src[j + srcIndex];
src[j + srcIndex] = void 0;
}
}
function | (capacity) {
this._capacity = capacity;
this._length = 0;
this._front = 0;
}
Queue.prototype._willBeOverCapacity = function (size) {
return this._capacity < size;
};
Queue.prototype._pushOne = function (arg) {
var length = this.length();
this._checkCapacity(length + 1);
var i = (this._front + length) & (this._capacity - 1);
this[i] = arg;
this._length = length + 1;
};
Queue.prototype._unshiftOne = function(value) {
var capacity = this._capacity;
this._checkCapacity(this.length() + 1);
var front = this._front;
var i = (((( front - 1 ) &
( capacity - 1) ) ^ capacity ) - capacity );
this[i] = value;
this._front = i;
this._length = this.length() + 1;
};
Queue.prototype.unshift = function(fn, receiver, arg) {
this._unshiftOne(arg);
this._unshiftOne(receiver);
this._unshiftOne(fn);
};
Queue.prototype.push = function (fn, receiver, arg) {
ASSERT(arguments.length === 3);
ASSERT(typeof fn === "function");
var length = this.length() + 3;
if (this._willBeOverCapacity(length)) {
//The fast array copies expect the
//underlying array to be filled completely
this._pushOne(fn);
this._pushOne(receiver);
this._pushOne(arg);
return;
}
var j = this._front + length - 3;
this._checkCapacity(length);
var wrapMask = this._capacity - 1;
this[(j + 0) & wrapMask] = fn;
this[(j + 1) & wrapMask] = receiver;
this[(j + 2) & wrapMask] = arg;
this._length = length;
};
Queue.prototype.shift = function () {
ASSERT(this.length() > 0);
var front = this._front,
ret = this[front];
this[front] = undefined;
this._front = (front + 1) & (this._capacity - 1);
this._length--;
return ret;
};
Queue.prototype.length = function () {
return this._length;
};
Queue.prototype._checkCapacity = function (size) {
if (this._capacity < size) {
this._resizeTo(this._capacity << 1);
}
};
Queue.prototype._resizeTo = function (capacity) {
var oldCapacity = this._capacity;
this._capacity = capacity;
var front = this._front;
var length = this._length;
var moveItemsCount = (front + length) & (oldCapacity - 1);
arrayMove(this, 0, this, oldCapacity, moveItemsCount);
};
module.exports = Queue;
| Queue | identifier_name |
queue.js | "use strict";
var ASSERT = require("./assert");
function arrayMove(src, srcIndex, dst, dstIndex, len) {
for (var j = 0; j < len; ++j) {
dst[j + dstIndex] = src[j + srcIndex];
src[j + srcIndex] = void 0;
}
}
function Queue(capacity) {
this._capacity = capacity;
this._length = 0;
this._front = 0;
}
Queue.prototype._willBeOverCapacity = function (size) {
return this._capacity < size;
};
Queue.prototype._pushOne = function (arg) {
var length = this.length();
this._checkCapacity(length + 1);
var i = (this._front + length) & (this._capacity - 1);
this[i] = arg;
this._length = length + 1;
};
Queue.prototype._unshiftOne = function(value) {
var capacity = this._capacity;
this._checkCapacity(this.length() + 1);
var front = this._front;
var i = (((( front - 1 ) &
( capacity - 1) ) ^ capacity ) - capacity );
this[i] = value;
this._front = i;
this._length = this.length() + 1;
};
Queue.prototype.unshift = function(fn, receiver, arg) {
this._unshiftOne(arg);
this._unshiftOne(receiver);
this._unshiftOne(fn);
};
Queue.prototype.push = function (fn, receiver, arg) {
ASSERT(arguments.length === 3);
ASSERT(typeof fn === "function");
var length = this.length() + 3;
if (this._willBeOverCapacity(length)) {
//The fast array copies expect the
//underlying array to be filled completely
this._pushOne(fn);
this._pushOne(receiver);
this._pushOne(arg);
return;
}
var j = this._front + length - 3;
this._checkCapacity(length);
var wrapMask = this._capacity - 1;
this[(j + 0) & wrapMask] = fn;
this[(j + 1) & wrapMask] = receiver;
this[(j + 2) & wrapMask] = arg;
this._length = length;
};
Queue.prototype.shift = function () {
ASSERT(this.length() > 0);
var front = this._front,
ret = this[front];
this[front] = undefined;
this._front = (front + 1) & (this._capacity - 1);
this._length--;
return ret;
}; | return this._length;
};
Queue.prototype._checkCapacity = function (size) {
if (this._capacity < size) {
this._resizeTo(this._capacity << 1);
}
};
Queue.prototype._resizeTo = function (capacity) {
var oldCapacity = this._capacity;
this._capacity = capacity;
var front = this._front;
var length = this._length;
var moveItemsCount = (front + length) & (oldCapacity - 1);
arrayMove(this, 0, this, oldCapacity, moveItemsCount);
};
module.exports = Queue; |
Queue.prototype.length = function () { | random_line_split |
keytar.test.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as assert from 'assert';
import * as platform from 'vs/base/common/platform';
suite('Keytar', () => {
test('loads and is functional', function (done) {
if (platform.isLinux) |
(async () => {
const keytar = await import('keytar');
const name = `VSCode Test ${Math.floor(Math.random() * 1e9)}`;
try {
await keytar.setPassword(name, 'foo', 'bar');
assert.equal(await keytar.getPassword(name, 'foo'), 'bar');
await keytar.deletePassword(name, 'foo');
assert.equal(await keytar.getPassword(name, 'foo'), undefined);
} catch (err) {
// try to clean up
try {
await keytar.deletePassword(name, 'foo');
} finally {
// tslint:disable-next-line: no-unsafe-finally
throw err;
}
}
})().then(done, done);
});
}); | {
// Skip test due to set up issue with Travis.
this.skip();
return;
} | conditional_block |
keytar.test.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as assert from 'assert';
import * as platform from 'vs/base/common/platform';
| if (platform.isLinux) {
// Skip test due to set up issue with Travis.
this.skip();
return;
}
(async () => {
const keytar = await import('keytar');
const name = `VSCode Test ${Math.floor(Math.random() * 1e9)}`;
try {
await keytar.setPassword(name, 'foo', 'bar');
assert.equal(await keytar.getPassword(name, 'foo'), 'bar');
await keytar.deletePassword(name, 'foo');
assert.equal(await keytar.getPassword(name, 'foo'), undefined);
} catch (err) {
// try to clean up
try {
await keytar.deletePassword(name, 'foo');
} finally {
// tslint:disable-next-line: no-unsafe-finally
throw err;
}
}
})().then(done, done);
});
}); | suite('Keytar', () => {
test('loads and is functional', function (done) { | random_line_split |
TableDialog.js.uncompressed.js | define("dojox/editor/plugins/nls/mk/TableDialog", {
//begin v1.x content
insertTableTitle: "Вметни табела",
modifyTableTitle: "Модифицирај табела",
rows: "Редови",
columns: "Колони",
align: "Порамни:",
cellPadding: "Дополнување на ќелија:",
cellSpacing: "Растојание меѓу ќелии:",
tableWidth: "Ширина на табела:",
backgroundColor: "Боја на заднина:",
borderColor: "Боја на раб:",
borderThickness: "Дебелина на раб:",
percent: "процент",
pixels: "пиксели",
"default": "стандардно",
left: "лево",
center: "центар",
right: "десно",
buttonSet: "Постави", // translated elsewhere?
buttonInsert: "Вметни",
buttonCancel: "Откажи",
selectTableLabel: "Избери табела",
insertTableRowBeforeLabel: "Додај ред пред",
insertTableRowAfterLabel: "Додај ред после",
insertTableColumnBeforeLabel: "Додај колона пред", | deleteTableColumnLabel: "Избриши колона",
colorTableCellTitle: "Боја на заднина на ќелија на табела",
tableContextMenuTitle: "Контекстуално мени на табела"
//end v1.x content
}); | insertTableColumnAfterLabel: "Додај колона после",
deleteTableRowLabel: "Избриши ред", | random_line_split |
defaults-fa_IR.min.js | /*! | *
* Copyright 2013-2016 bootstrap-select
* Licensed under MIT (https://github.com/silviomoreto/bootstrap-select/blob/master/LICENSE)
*/
!function(a,b){"function"==typeof define&&define.amd?define(["jquery"],function(a){return b(a)}):"object"==typeof exports?module.exports=b(require("jquery")):b(jQuery)}(this,function(a){!function(a){a.fn.selectpicker.defaults={noneSelectedText:"چیزی انتخاب نشده است",noneResultsText:"هیج مشابهی برای {0} پیدا نشد",countSelectedText:"{0} از {1} مورد انتخاب شده",maxOptionsText:["بیشتر ممکن نیست {حداکثر {n} عدد}","بیشتر ممکن نیست {حداکثر {n} عدد}"],selectAllText:"انتخاب همه",deselectAllText:"انتخاب هیچ کدام",multipleSeparator:", "}}(a)}); | * Bootstrap-select v1.11.0 (http://silviomoreto.github.io/bootstrap-select) | random_line_split |
iterateOverGenerator.es6.js | /**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const compose = require('../../vendor/koa-compose.es6');
/**
*
* @param Array<GeneratorFunction> middleware
*/
export default function iterateOverGenerator(middleware) {
let iterator = compose(middleware)();
function iterate(value) |
setTimeout(iterate, 0);
}
| {
let iteration;
if (value) {
iteration = value.next();
} else {
iteration = iterator.next();
}
if (!iteration.done) {
if ('then' in iteration.value) {
iteration.value.then(function iterateGenerator() {
iterator.next();
});
} else {
iterate(iteration.value);
}
}
} | identifier_body |
iterateOverGenerator.es6.js | /**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const compose = require('../../vendor/koa-compose.es6');
/**
*
* @param Array<GeneratorFunction> middleware
*/
export default function iterateOverGenerator(middleware) {
let iterator = compose(middleware)();
function iterate(value) {
let iteration;
if (value) {
iteration = value.next();
} else |
if (!iteration.done) {
if ('then' in iteration.value) {
iteration.value.then(function iterateGenerator() {
iterator.next();
});
} else {
iterate(iteration.value);
}
}
}
setTimeout(iterate, 0);
}
| {
iteration = iterator.next();
} | conditional_block |
iterateOverGenerator.es6.js | /**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const compose = require('../../vendor/koa-compose.es6');
/**
*
* @param Array<GeneratorFunction> middleware
*/
export default function iterateOverGenerator(middleware) {
let iterator = compose(middleware)();
function | (value) {
let iteration;
if (value) {
iteration = value.next();
} else {
iteration = iterator.next();
}
if (!iteration.done) {
if ('then' in iteration.value) {
iteration.value.then(function iterateGenerator() {
iterator.next();
});
} else {
iterate(iteration.value);
}
}
}
setTimeout(iterate, 0);
}
| iterate | identifier_name |
iterateOverGenerator.es6.js | /**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const compose = require('../../vendor/koa-compose.es6');
/**
*
* @param Array<GeneratorFunction> middleware
*/
export default function iterateOverGenerator(middleware) {
let iterator = compose(middleware)();
| } else {
iteration = iterator.next();
}
if (!iteration.done) {
if ('then' in iteration.value) {
iteration.value.then(function iterateGenerator() {
iterator.next();
});
} else {
iterate(iteration.value);
}
}
}
setTimeout(iterate, 0);
} | function iterate(value) {
let iteration;
if (value) {
iteration = value.next(); | random_line_split |
xhr_impl_spec.ts | import {
AsyncTestCompleter,
beforeEach,
ddescribe,
describe,
expect,
iit,
inject,
it,
xit
} from 'angular2/test_lib';
import {XHRImpl} from 'angular2/src/core/render/xhr_impl';
import {PromiseWrapper} from 'angular2/src/core/facade/async';
export function | () {
describe('XHRImpl', () => {
var xhr: XHRImpl;
var url200 = '/base/modules/angular2/test/core/services/static_assets/200.html';
var url404 = '/base/modules/angular2/test/core/services/static_assets/404.html';
beforeEach(() => { xhr = new XHRImpl(); });
it('should resolve the Promise with the file content on success',
inject([AsyncTestCompleter], (async) => {
xhr.get(url200).then((text) => {
expect(text.trim()).toEqual('<p>hey</p>');
async.done();
});
}), 10000);
it('should reject the Promise on failure', inject([AsyncTestCompleter], (async) => {
PromiseWrapper.catchError(xhr.get(url404), (e) => {
expect(e).toEqual(`Failed to load ${url404}`);
async.done();
return null;
});
}), 10000);
});
}
| main | identifier_name |
xhr_impl_spec.ts | import {
AsyncTestCompleter,
beforeEach,
ddescribe,
describe,
expect,
iit,
inject,
it,
xit
} from 'angular2/test_lib';
import {XHRImpl} from 'angular2/src/core/render/xhr_impl';
import {PromiseWrapper} from 'angular2/src/core/facade/async';
export function main() {
describe('XHRImpl', () => {
var xhr: XHRImpl;
var url200 = '/base/modules/angular2/test/core/services/static_assets/200.html';
var url404 = '/base/modules/angular2/test/core/services/static_assets/404.html';
beforeEach(() => { xhr = new XHRImpl(); });
it('should resolve the Promise with the file content on success',
inject([AsyncTestCompleter], (async) => {
xhr.get(url200).then((text) => {
expect(text.trim()).toEqual('<p>hey</p>');
async.done();
});
}), 10000);
it('should reject the Promise on failure', inject([AsyncTestCompleter], (async) => {
PromiseWrapper.catchError(xhr.get(url404), (e) => {
expect(e).toEqual(`Failed to load ${url404}`);
async.done(); | return null;
});
}), 10000);
});
} | random_line_split |
|
xhr_impl_spec.ts | import {
AsyncTestCompleter,
beforeEach,
ddescribe,
describe,
expect,
iit,
inject,
it,
xit
} from 'angular2/test_lib';
import {XHRImpl} from 'angular2/src/core/render/xhr_impl';
import {PromiseWrapper} from 'angular2/src/core/facade/async';
export function main() | {
describe('XHRImpl', () => {
var xhr: XHRImpl;
var url200 = '/base/modules/angular2/test/core/services/static_assets/200.html';
var url404 = '/base/modules/angular2/test/core/services/static_assets/404.html';
beforeEach(() => { xhr = new XHRImpl(); });
it('should resolve the Promise with the file content on success',
inject([AsyncTestCompleter], (async) => {
xhr.get(url200).then((text) => {
expect(text.trim()).toEqual('<p>hey</p>');
async.done();
});
}), 10000);
it('should reject the Promise on failure', inject([AsyncTestCompleter], (async) => {
PromiseWrapper.catchError(xhr.get(url404), (e) => {
expect(e).toEqual(`Failed to load ${url404}`);
async.done();
return null;
});
}), 10000);
});
} | identifier_body |
|
lib.rs | use std::io::ErrorKind;
use std::net::Ipv4Addr;
use std::net::TcpStream;
use spaceapi_server::api;
use spaceapi_server::{SpaceapiServer, SpaceapiServerBuilder};
/// Create a new status object containing test data.
fn | () -> api::Status {
api::StatusBuilder::new("ourspace")
.logo("https://example.com/logo.png")
.url("https://example.com/")
.location(api::Location {
address: Some("Street 1, Zürich, Switzerland".into()),
lat: 47.123,
lon: 8.88,
})
.contact(api::Contact {
irc: None,
twitter: None,
foursquare: None,
email: Some("[email protected]".into()),
ml: None,
phone: None,
jabber: None,
issue_mail: None,
identica: None,
facebook: None,
google: None,
keymasters: None,
sip: None,
})
.add_issue_report_channel(api::IssueReportChannel::Email)
.add_issue_report_channel(api::IssueReportChannel::Twitter)
.build()
.unwrap()
}
/// Create a new SpaceapiServer instance listening on the specified port.
fn get_server(status: api::Status) -> SpaceapiServer {
SpaceapiServerBuilder::new(status)
.redis_connection_info("redis://127.0.0.1/")
.build()
.unwrap()
}
#[test]
fn server_starts() {
//! Test that the spaceapi server starts at all.
// Ip / port for test server
let ip = Ipv4Addr::new(127, 0, 0, 1);
let port = 3344;
// Test data
let status = get_status();
// Connection to port should fail right now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_err());
assert_eq!(connect_result.unwrap_err().kind(), ErrorKind::ConnectionRefused);
// Instantiate and start server
let server = get_server(status);
let mut listening = server.serve((ip, port)).unwrap();
// Connecting to server should work now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_ok());
// Close server
listening.close().unwrap();
}
| get_status | identifier_name |
lib.rs | use std::io::ErrorKind;
use std::net::Ipv4Addr;
use std::net::TcpStream;
use spaceapi_server::api;
use spaceapi_server::{SpaceapiServer, SpaceapiServerBuilder};
/// Create a new status object containing test data.
fn get_status() -> api::Status {
api::StatusBuilder::new("ourspace")
.logo("https://example.com/logo.png")
.url("https://example.com/")
.location(api::Location {
address: Some("Street 1, Zürich, Switzerland".into()),
lat: 47.123,
lon: 8.88,
})
.contact(api::Contact {
irc: None,
twitter: None,
foursquare: None,
email: Some("[email protected]".into()),
ml: None,
phone: None,
jabber: None,
issue_mail: None,
identica: None,
facebook: None,
google: None,
keymasters: None,
sip: None,
})
.add_issue_report_channel(api::IssueReportChannel::Email)
.add_issue_report_channel(api::IssueReportChannel::Twitter)
.build()
.unwrap()
}
/// Create a new SpaceapiServer instance listening on the specified port.
fn get_server(status: api::Status) -> SpaceapiServer { |
#[test]
fn server_starts() {
//! Test that the spaceapi server starts at all.
// Ip / port for test server
let ip = Ipv4Addr::new(127, 0, 0, 1);
let port = 3344;
// Test data
let status = get_status();
// Connection to port should fail right now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_err());
assert_eq!(connect_result.unwrap_err().kind(), ErrorKind::ConnectionRefused);
// Instantiate and start server
let server = get_server(status);
let mut listening = server.serve((ip, port)).unwrap();
// Connecting to server should work now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_ok());
// Close server
listening.close().unwrap();
}
|
SpaceapiServerBuilder::new(status)
.redis_connection_info("redis://127.0.0.1/")
.build()
.unwrap()
}
| identifier_body |
lib.rs | use std::io::ErrorKind;
use std::net::Ipv4Addr;
use std::net::TcpStream;
use spaceapi_server::api;
use spaceapi_server::{SpaceapiServer, SpaceapiServerBuilder};
/// Create a new status object containing test data.
fn get_status() -> api::Status {
api::StatusBuilder::new("ourspace")
.logo("https://example.com/logo.png")
.url("https://example.com/")
.location(api::Location {
address: Some("Street 1, Zürich, Switzerland".into()),
lat: 47.123,
lon: 8.88,
})
.contact(api::Contact {
irc: None,
twitter: None,
foursquare: None,
email: Some("[email protected]".into()),
ml: None,
phone: None,
jabber: None,
issue_mail: None,
identica: None,
facebook: None,
google: None,
keymasters: None,
sip: None,
})
.add_issue_report_channel(api::IssueReportChannel::Email)
.add_issue_report_channel(api::IssueReportChannel::Twitter)
.build()
.unwrap()
}
/// Create a new SpaceapiServer instance listening on the specified port.
fn get_server(status: api::Status) -> SpaceapiServer {
SpaceapiServerBuilder::new(status)
.redis_connection_info("redis://127.0.0.1/")
.build()
.unwrap()
}
#[test]
fn server_starts() {
//! Test that the spaceapi server starts at all.
| // Ip / port for test server
let ip = Ipv4Addr::new(127, 0, 0, 1);
let port = 3344;
// Test data
let status = get_status();
// Connection to port should fail right now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_err());
assert_eq!(connect_result.unwrap_err().kind(), ErrorKind::ConnectionRefused);
// Instantiate and start server
let server = get_server(status);
let mut listening = server.serve((ip, port)).unwrap();
// Connecting to server should work now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_ok());
// Close server
listening.close().unwrap();
} | random_line_split |
|
elf64.rs | use core::fmt;
use core::iter::Iterator;
use ::kern::console::LogLevel::*;
pub const SIZEOF_IDENT: usize = 16;
pub const SIZEOF_EHDR: usize = 64;
pub const ELFCLASS: u8 = ELFCLASS64;
#[repr(C)]
#[derive(Clone, Copy, Default, PartialEq)]
pub struct Header {
/// Magic number and other info
pub e_ident: [u8; SIZEOF_IDENT],
/// Object file type
pub e_type: u16,
/// Architecture
pub e_machine: u16,
/// Object file version
pub e_version: u32,
/// Entry point virtual address
pub e_entry: u64,
/// Program header table file offset
pub e_phoff: u64,
/// Section header table file offset
pub e_shoff: u64,
/// Processor-specific flags
pub e_flags: u32,
/// ELF header size in bytes
pub e_ehsize: u16,
/// Program header table entry size
pub e_phentsize: u16,
/// Program header table entry count
pub e_phnum: u16,
/// Section header table entry size
pub e_shentsize: u16,
/// Section header table entry count
pub e_shnum: u16,
/// Section header string table index
pub e_shstrndx: u16,
}
impl fmt::Debug for Header {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "e_ident: {:?} e_type: {} e_machine: 0x{:x} e_version: 0x{:x} e_entry: 0x{:x} \
e_phoff: 0x{:x} e_shoff: 0x{:x} e_flags: {:x} e_ehsize: {} e_phentsize: {} \
e_phnum: {} e_shentsize: {} e_shnum: {} e_shstrndx: {}",
self.e_ident,
et_to_str(self.e_type),
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
}
}
/// No file type.
pub const ET_NONE: u16 = 0;
/// Relocatable file.
pub const ET_REL: u16 = 1;
/// Executable file.
pub const ET_EXEC: u16 = 2;
/// Shared object file.
pub const ET_DYN: u16 = 3;
/// Core file.
pub const ET_CORE: u16 = 4;
/// Number of defined types.
pub const ET_NUM: u16 = 5;
/// The ELF magic number.
pub const ELFMAG: &'static [u8; 4] = b"\x7FELF";
/// Sizeof ELF magic number.
pub const SELFMAG: usize = 4;
/// File class byte index.
pub const EI_CLASS: usize = 4;
/// Invalid class.
pub const ELFCLASSNONE: u8 = 0;
/// 32-bit objects.
pub const ELFCLASS32: u8 = 1;
/// 64-bit objects.
pub const ELFCLASS64: u8 = 2;
/// ELF class number.
pub const ELFCLASSNUM: u8 = 3;
/// Convert an ET value to their associated string.
#[inline]
pub fn et_to_str(et: u16) -> &'static str {
match et {
ET_NONE => "NONE",
ET_REL => "REL",
ET_EXEC => "EXEC",
ET_DYN => "DYN",
ET_CORE => "CORE",
ET_NUM => "NUM",
_ => "UNKNOWN_ET",
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Default)]
pub struct ProgramHeader {
/// Segment type
pub p_type: u32,
/// Segment flags
pub p_flags: u32,
/// Segment file offset
pub p_offset: u64,
/// Segment virtual address
pub p_vaddr: u64,
/// Segment physical address
pub p_paddr: u64,
/// Segment size in file
pub p_filesz: u64,
/// Segment size in memory
pub p_memsz: u64,
/// Segment alignment
pub p_align: u64,
}
pub const SIZEOF_PHDR: usize = 56;
/// Program header table entry unused
pub const PT_NULL: u32 = 0;
/// Loadable program segment
pub const PT_LOAD: u32 = 1;
/// Dynamic linking information
pub const PT_DYNAMIC: u32 = 2;
/// Program interpreter
pub const PT_INTERP: u32 = 3;
/// Auxiliary information
pub const PT_NOTE: u32 = 4;
/// Reserved
pub const PT_SHLIB: u32 = 5;
/// Entry for header table itself
pub const PT_PHDR: u32 = 6;
/// Thread-local storage segment
pub const PT_TLS: u32 = 7;
/// Number of defined types
pub const PT_NUM: u32 = 8;
/// Start of OS-specific
pub const PT_LOOS: u32 = 0x60000000;
/// GCC .eh_frame_hdr segment
pub const PT_GNU_EH_FRAME: u32 = 0x6474e550;
/// Indicates stack executability
pub const PT_GNU_STACK: u32 = 0x6474e551;
/// Read-only after relocation
pub const PT_GNU_RELRO: u32 = 0x6474e552;
/// Sun Specific segment
pub const PT_LOSUNW: u32 = 0x6ffffffa;
/// Sun Specific segment
pub const PT_SUNWBSS: u32 = 0x6ffffffa;
/// Stack segment
pub const PT_SUNWSTACK: u32 = 0x6ffffffb;
/// End of OS-specific
pub const PT_HISUNW: u32 = 0x6fffffff;
/// End of OS-specific
pub const PT_HIOS: u32 = 0x6fffffff;
/// Start of processor-specific
pub const PT_LOPROC: u32 = 0x70000000;
/// ARM unwind segment
pub const PT_ARM_EXIDX: u32 = 0x70000001;
/// End of processor-specific
pub const PT_HIPROC: u32 = 0x7fffffff;
/// Segment is executable
pub const PF_X: u32 = 1 << 0;
/// Segment is writable
pub const PF_W: u32 = 1 << 1;
/// Segment is readable
pub const PF_R: u32 = 1 << 2;
pub struct ProgramHeaderIter<'a> {
data: &'a [u8],
header: &'a Header,
next: usize
}
pub struct Elf64<'a> {
pub header: &'a Header,
pub data: &'a [u8]
}
impl<'a> Elf64<'a> {
pub unsafe fn | (bytes: &'a [u8]) -> Elf64<'a> {
let h = &*(bytes.as_ptr() as *const Header);
Elf64 {
data: bytes,
header: h,
}
}
pub fn program_headers(&self) -> ProgramHeaderIter<'a> {
ProgramHeaderIter {
data: self.data,
header: self.header,
next: 0
}
}
}
impl<'a> Iterator for ProgramHeaderIter<'a> {
type Item = &'a ProgramHeader;
fn next(&mut self) -> Option<Self::Item> {
if self.next < self.header.e_phnum as usize {
let program = unsafe {
&*(self.data.as_ptr().offset(
self.header.e_phoff as isize +
self.header.e_phentsize as isize * self.next as isize)
as *const ProgramHeader)
};
self.next += 1;
Some(program)
} else {
None
}
}
}
| from | identifier_name |
elf64.rs | use core::fmt;
use core::iter::Iterator;
use ::kern::console::LogLevel::*;
pub const SIZEOF_IDENT: usize = 16;
pub const SIZEOF_EHDR: usize = 64;
pub const ELFCLASS: u8 = ELFCLASS64;
#[repr(C)]
#[derive(Clone, Copy, Default, PartialEq)]
pub struct Header {
/// Magic number and other info
pub e_ident: [u8; SIZEOF_IDENT],
/// Object file type
pub e_type: u16,
/// Architecture
pub e_machine: u16,
/// Object file version
pub e_version: u32,
/// Entry point virtual address
pub e_entry: u64,
/// Program header table file offset
pub e_phoff: u64,
/// Section header table file offset
pub e_shoff: u64,
/// Processor-specific flags
pub e_flags: u32,
/// ELF header size in bytes
pub e_ehsize: u16,
/// Program header table entry size
pub e_phentsize: u16,
/// Program header table entry count
pub e_phnum: u16,
/// Section header table entry size
pub e_shentsize: u16,
/// Section header table entry count
pub e_shnum: u16,
/// Section header string table index
pub e_shstrndx: u16,
}
impl fmt::Debug for Header {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "e_ident: {:?} e_type: {} e_machine: 0x{:x} e_version: 0x{:x} e_entry: 0x{:x} \
e_phoff: 0x{:x} e_shoff: 0x{:x} e_flags: {:x} e_ehsize: {} e_phentsize: {} \
e_phnum: {} e_shentsize: {} e_shnum: {} e_shstrndx: {}",
self.e_ident,
et_to_str(self.e_type),
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
}
}
/// No file type.
pub const ET_NONE: u16 = 0;
/// Relocatable file.
pub const ET_REL: u16 = 1;
/// Executable file.
pub const ET_EXEC: u16 = 2;
/// Shared object file.
pub const ET_DYN: u16 = 3;
/// Core file.
pub const ET_CORE: u16 = 4;
/// Number of defined types.
pub const ET_NUM: u16 = 5;
/// The ELF magic number.
pub const ELFMAG: &'static [u8; 4] = b"\x7FELF";
/// Sizeof ELF magic number.
pub const SELFMAG: usize = 4;
/// File class byte index.
pub const EI_CLASS: usize = 4;
/// Invalid class.
pub const ELFCLASSNONE: u8 = 0;
/// 32-bit objects.
pub const ELFCLASS32: u8 = 1;
/// 64-bit objects.
pub const ELFCLASS64: u8 = 2;
/// ELF class number.
pub const ELFCLASSNUM: u8 = 3;
/// Convert an ET value to their associated string.
#[inline]
pub fn et_to_str(et: u16) -> &'static str {
match et {
ET_NONE => "NONE",
ET_REL => "REL",
ET_EXEC => "EXEC",
ET_DYN => "DYN",
ET_CORE => "CORE",
ET_NUM => "NUM",
_ => "UNKNOWN_ET",
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Default)]
pub struct ProgramHeader {
/// Segment type
pub p_type: u32,
/// Segment flags
pub p_flags: u32,
/// Segment file offset
pub p_offset: u64,
/// Segment virtual address
pub p_vaddr: u64,
/// Segment physical address
pub p_paddr: u64,
/// Segment size in file
pub p_filesz: u64,
/// Segment size in memory
pub p_memsz: u64,
/// Segment alignment
pub p_align: u64,
}
pub const SIZEOF_PHDR: usize = 56;
/// Program header table entry unused
pub const PT_NULL: u32 = 0;
/// Loadable program segment
pub const PT_LOAD: u32 = 1;
/// Dynamic linking information
pub const PT_DYNAMIC: u32 = 2;
/// Program interpreter
pub const PT_INTERP: u32 = 3;
/// Auxiliary information
pub const PT_NOTE: u32 = 4;
/// Reserved
pub const PT_SHLIB: u32 = 5;
/// Entry for header table itself
pub const PT_PHDR: u32 = 6;
/// Thread-local storage segment
pub const PT_TLS: u32 = 7;
/// Number of defined types
pub const PT_NUM: u32 = 8;
/// Start of OS-specific
pub const PT_LOOS: u32 = 0x60000000;
/// GCC .eh_frame_hdr segment
pub const PT_GNU_EH_FRAME: u32 = 0x6474e550;
/// Indicates stack executability
pub const PT_GNU_STACK: u32 = 0x6474e551;
/// Read-only after relocation
pub const PT_GNU_RELRO: u32 = 0x6474e552;
/// Sun Specific segment
pub const PT_LOSUNW: u32 = 0x6ffffffa;
/// Sun Specific segment
pub const PT_SUNWBSS: u32 = 0x6ffffffa;
/// Stack segment
pub const PT_SUNWSTACK: u32 = 0x6ffffffb;
/// End of OS-specific
pub const PT_HISUNW: u32 = 0x6fffffff;
/// End of OS-specific
pub const PT_HIOS: u32 = 0x6fffffff;
/// Start of processor-specific
pub const PT_LOPROC: u32 = 0x70000000;
/// ARM unwind segment
pub const PT_ARM_EXIDX: u32 = 0x70000001;
/// End of processor-specific
pub const PT_HIPROC: u32 = 0x7fffffff;
| pub const PF_X: u32 = 1 << 0;
/// Segment is writable
pub const PF_W: u32 = 1 << 1;
/// Segment is readable
pub const PF_R: u32 = 1 << 2;
pub struct ProgramHeaderIter<'a> {
data: &'a [u8],
header: &'a Header,
next: usize
}
pub struct Elf64<'a> {
pub header: &'a Header,
pub data: &'a [u8]
}
impl<'a> Elf64<'a> {
pub unsafe fn from(bytes: &'a [u8]) -> Elf64<'a> {
let h = &*(bytes.as_ptr() as *const Header);
Elf64 {
data: bytes,
header: h,
}
}
pub fn program_headers(&self) -> ProgramHeaderIter<'a> {
ProgramHeaderIter {
data: self.data,
header: self.header,
next: 0
}
}
}
impl<'a> Iterator for ProgramHeaderIter<'a> {
type Item = &'a ProgramHeader;
fn next(&mut self) -> Option<Self::Item> {
if self.next < self.header.e_phnum as usize {
let program = unsafe {
&*(self.data.as_ptr().offset(
self.header.e_phoff as isize +
self.header.e_phentsize as isize * self.next as isize)
as *const ProgramHeader)
};
self.next += 1;
Some(program)
} else {
None
}
}
} | /// Segment is executable | random_line_split |
elf64.rs | use core::fmt;
use core::iter::Iterator;
use ::kern::console::LogLevel::*;
pub const SIZEOF_IDENT: usize = 16;
pub const SIZEOF_EHDR: usize = 64;
pub const ELFCLASS: u8 = ELFCLASS64;
#[repr(C)]
#[derive(Clone, Copy, Default, PartialEq)]
pub struct Header {
/// Magic number and other info
pub e_ident: [u8; SIZEOF_IDENT],
/// Object file type
pub e_type: u16,
/// Architecture
pub e_machine: u16,
/// Object file version
pub e_version: u32,
/// Entry point virtual address
pub e_entry: u64,
/// Program header table file offset
pub e_phoff: u64,
/// Section header table file offset
pub e_shoff: u64,
/// Processor-specific flags
pub e_flags: u32,
/// ELF header size in bytes
pub e_ehsize: u16,
/// Program header table entry size
pub e_phentsize: u16,
/// Program header table entry count
pub e_phnum: u16,
/// Section header table entry size
pub e_shentsize: u16,
/// Section header table entry count
pub e_shnum: u16,
/// Section header string table index
pub e_shstrndx: u16,
}
impl fmt::Debug for Header {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
/// No file type.
pub const ET_NONE: u16 = 0;
/// Relocatable file.
pub const ET_REL: u16 = 1;
/// Executable file.
pub const ET_EXEC: u16 = 2;
/// Shared object file.
pub const ET_DYN: u16 = 3;
/// Core file.
pub const ET_CORE: u16 = 4;
/// Number of defined types.
pub const ET_NUM: u16 = 5;
/// The ELF magic number.
pub const ELFMAG: &'static [u8; 4] = b"\x7FELF";
/// Sizeof ELF magic number.
pub const SELFMAG: usize = 4;
/// File class byte index.
pub const EI_CLASS: usize = 4;
/// Invalid class.
pub const ELFCLASSNONE: u8 = 0;
/// 32-bit objects.
pub const ELFCLASS32: u8 = 1;
/// 64-bit objects.
pub const ELFCLASS64: u8 = 2;
/// ELF class number.
pub const ELFCLASSNUM: u8 = 3;
/// Convert an ET value to their associated string.
#[inline]
pub fn et_to_str(et: u16) -> &'static str {
match et {
ET_NONE => "NONE",
ET_REL => "REL",
ET_EXEC => "EXEC",
ET_DYN => "DYN",
ET_CORE => "CORE",
ET_NUM => "NUM",
_ => "UNKNOWN_ET",
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Default)]
pub struct ProgramHeader {
/// Segment type
pub p_type: u32,
/// Segment flags
pub p_flags: u32,
/// Segment file offset
pub p_offset: u64,
/// Segment virtual address
pub p_vaddr: u64,
/// Segment physical address
pub p_paddr: u64,
/// Segment size in file
pub p_filesz: u64,
/// Segment size in memory
pub p_memsz: u64,
/// Segment alignment
pub p_align: u64,
}
pub const SIZEOF_PHDR: usize = 56;
/// Program header table entry unused
pub const PT_NULL: u32 = 0;
/// Loadable program segment
pub const PT_LOAD: u32 = 1;
/// Dynamic linking information
pub const PT_DYNAMIC: u32 = 2;
/// Program interpreter
pub const PT_INTERP: u32 = 3;
/// Auxiliary information
pub const PT_NOTE: u32 = 4;
/// Reserved
pub const PT_SHLIB: u32 = 5;
/// Entry for header table itself
pub const PT_PHDR: u32 = 6;
/// Thread-local storage segment
pub const PT_TLS: u32 = 7;
/// Number of defined types
pub const PT_NUM: u32 = 8;
/// Start of OS-specific
pub const PT_LOOS: u32 = 0x60000000;
/// GCC .eh_frame_hdr segment
pub const PT_GNU_EH_FRAME: u32 = 0x6474e550;
/// Indicates stack executability
pub const PT_GNU_STACK: u32 = 0x6474e551;
/// Read-only after relocation
pub const PT_GNU_RELRO: u32 = 0x6474e552;
/// Sun Specific segment
pub const PT_LOSUNW: u32 = 0x6ffffffa;
/// Sun Specific segment
pub const PT_SUNWBSS: u32 = 0x6ffffffa;
/// Stack segment
pub const PT_SUNWSTACK: u32 = 0x6ffffffb;
/// End of OS-specific
pub const PT_HISUNW: u32 = 0x6fffffff;
/// End of OS-specific
pub const PT_HIOS: u32 = 0x6fffffff;
/// Start of processor-specific
pub const PT_LOPROC: u32 = 0x70000000;
/// ARM unwind segment
pub const PT_ARM_EXIDX: u32 = 0x70000001;
/// End of processor-specific
pub const PT_HIPROC: u32 = 0x7fffffff;
/// Segment is executable
pub const PF_X: u32 = 1 << 0;
/// Segment is writable
pub const PF_W: u32 = 1 << 1;
/// Segment is readable
pub const PF_R: u32 = 1 << 2;
pub struct ProgramHeaderIter<'a> {
data: &'a [u8],
header: &'a Header,
next: usize
}
pub struct Elf64<'a> {
pub header: &'a Header,
pub data: &'a [u8]
}
impl<'a> Elf64<'a> {
pub unsafe fn from(bytes: &'a [u8]) -> Elf64<'a> {
let h = &*(bytes.as_ptr() as *const Header);
Elf64 {
data: bytes,
header: h,
}
}
pub fn program_headers(&self) -> ProgramHeaderIter<'a> {
ProgramHeaderIter {
data: self.data,
header: self.header,
next: 0
}
}
}
impl<'a> Iterator for ProgramHeaderIter<'a> {
type Item = &'a ProgramHeader;
fn next(&mut self) -> Option<Self::Item> {
if self.next < self.header.e_phnum as usize {
let program = unsafe {
&*(self.data.as_ptr().offset(
self.header.e_phoff as isize +
self.header.e_phentsize as isize * self.next as isize)
as *const ProgramHeader)
};
self.next += 1;
Some(program)
} else {
None
}
}
}
| {
write!(f, "e_ident: {:?} e_type: {} e_machine: 0x{:x} e_version: 0x{:x} e_entry: 0x{:x} \
e_phoff: 0x{:x} e_shoff: 0x{:x} e_flags: {:x} e_ehsize: {} e_phentsize: {} \
e_phnum: {} e_shentsize: {} e_shnum: {} e_shstrndx: {}",
self.e_ident,
et_to_str(self.e_type),
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
} | identifier_body |
stream.rs | use metadata;
use frame;
use subframe;
use metadata::{Metadata, StreamInfo};
use frame::frame_parser;
use utility::{
ErrorKind, ByteStream, ReadStream, Sample, SampleSize, StreamProducer,
many_metadata,
};
use std::io;
use std::usize;
use std::fs::File;
/// FLAC stream that decodes and hold file information.
pub struct Stream<P: StreamProducer> {
info: StreamInfo,
metadata: Vec<Metadata>,
producer: P,
}
/// Alias for a FLAC stream produced from `Read`.
pub type StreamReader<R> = Stream<ReadStream<R>>;
/// Alias for a FLAC stream produced from a byte stream buffer.
pub type StreamBuffer<'a> = Stream<ByteStream<'a>>;
impl<P> Stream<P> where P: StreamProducer {
/// Constructor for the default state of a FLAC stream.
#[inline]
pub fn new<R: io::Read>(reader: R) -> Result<StreamReader<R>, ErrorKind> {
let producer = ReadStream::new(reader);
Stream::from_stream_producer(producer)
}
/// Returns information for the current stream.
#[inline]
pub fn info(&self) -> StreamInfo {
self.info
}
/// Returns a slice of `Metadata`
///
/// This slice excludes `StreamInfo`, which is located in `Stream::info`.
/// Everything else is related to metadata for the FLAC stream is in the
/// slice.
#[inline]
pub fn metadata(&self) -> &[Metadata] {
&self.metadata
}
/// Constructs a decoder with the given file name.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::NotFound)` is returned when the given
/// filename isn't found.
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn | (filename: &str) -> Result<StreamReader<File>, ErrorKind> {
File::open(filename).map_err(|e| ErrorKind::IO(e.kind()))
.and_then(|file| {
let producer = ReadStream::new(file);
Stream::from_stream_producer(producer)
})
}
/// Constructs a decoder with the given buffer.
///
/// This constructor assumes that an entire FLAC file is in the buffer.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn from_buffer(buffer: &[u8]) -> Result<StreamBuffer, ErrorKind> {
let producer = ByteStream::new(buffer);
Stream::from_stream_producer(producer)
}
fn from_stream_producer(mut producer: P) -> Result<Self, ErrorKind> {
let mut stream_info = Default::default();
let mut metadata = Vec::new();
many_metadata(&mut producer, |block| {
if let metadata::Data::StreamInfo(info) = block.data {
stream_info = info;
} else {
metadata.push(block);
}
}).map(|_| {
Stream {
info: stream_info,
metadata: metadata,
producer: producer,
}
})
}
/// Returns an iterator over the decoded samples.
#[inline]
pub fn iter<S: SampleSize>(&mut self) -> Iter<P, S::Extended> {
let samples_left = self.info.total_samples;
let channels = self.info.channels as usize;
let block_size = self.info.max_block_size as usize;
let buffer_size = block_size * channels;
Iter {
stream: self,
channel: 0,
block_size: 0,
sample_index: 0,
samples_left: samples_left,
buffer: vec![S::Extended::from_i8(0); buffer_size]
}
}
fn next_frame<S>(&mut self, buffer: &mut [S]) -> Option<usize>
where S: Sample {
let stream_info = &self.info;
loop {
match self.producer.parse(|i| frame_parser(i, stream_info, buffer)) {
Ok(frame) => {
let channels = frame.header.channels as usize;
let block_size = frame.header.block_size as usize;
let subframes = frame.subframes[0..channels].iter();
for (channel, subframe) in subframes.enumerate() {
let start = channel * block_size;
let end = (channel + 1) * block_size;
let output = &mut buffer[start..end];
subframe::decode(&subframe, block_size, output);
}
frame::decode(frame.header.channel_assignment, buffer);
return Some(block_size);
}
Err(ErrorKind::Continue) => continue,
Err(_) => return None,
}
}
}
}
/// An iterator over a reference of the decoded FLAC stream.
pub struct Iter<'a, P, S>
where P: 'a + StreamProducer,
S: Sample{
stream: &'a mut Stream<P>,
channel: usize,
block_size: usize,
sample_index: usize,
samples_left: u64,
buffer: Vec<S>,
}
impl<'a, P, S> Iterator for Iter<'a, P, S>
where P: StreamProducer,
S: Sample {
type Item = S::Normal;
fn next(&mut self) -> Option<Self::Item> {
if self.sample_index == self.block_size {
let buffer = &mut self.buffer;
if let Some(block_size) = self.stream.next_frame(buffer) {
self.sample_index = 0;
self.block_size = block_size;
} else {
return None;
}
}
let channels = self.stream.info.channels as usize;
let index = self.sample_index + (self.channel * self.block_size);
let sample = unsafe { *self.buffer.get_unchecked(index) };
self.channel += 1;
// Reset current channel
if self.channel == channels {
self.channel = 0;
self.sample_index += 1;
self.samples_left -= 1;
}
S::to_normal(sample)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let samples_left = self.samples_left as usize;
let max_value = usize::max_value() as u64;
// There is a chance that samples_left will be larger than a usize since
// it is a u64. Make the upper bound None when it is.
if self.samples_left > max_value {
(samples_left, None)
} else {
(samples_left, Some(samples_left))
}
}
}
//impl<'a, P, S> IntoIterator for &'a mut Stream<P>
// where P: StreamProducer,
// S: Sample {
// type Item = S::Normal;
// type IntoIter = Iter<'a, P, S>;
//
// #[inline]
// fn into_iter(self) -> Self::IntoIter {
// self.iter()
// }
//}
| from_file | identifier_name |
stream.rs | use metadata;
use frame;
use subframe;
use metadata::{Metadata, StreamInfo};
use frame::frame_parser;
use utility::{
ErrorKind, ByteStream, ReadStream, Sample, SampleSize, StreamProducer,
many_metadata,
};
use std::io;
use std::usize;
use std::fs::File;
/// FLAC stream that decodes and hold file information.
pub struct Stream<P: StreamProducer> {
info: StreamInfo,
metadata: Vec<Metadata>,
producer: P,
}
/// Alias for a FLAC stream produced from `Read`.
pub type StreamReader<R> = Stream<ReadStream<R>>;
/// Alias for a FLAC stream produced from a byte stream buffer.
pub type StreamBuffer<'a> = Stream<ByteStream<'a>>;
impl<P> Stream<P> where P: StreamProducer {
/// Constructor for the default state of a FLAC stream.
#[inline]
pub fn new<R: io::Read>(reader: R) -> Result<StreamReader<R>, ErrorKind> |
/// Returns information for the current stream.
#[inline]
pub fn info(&self) -> StreamInfo {
self.info
}
/// Returns a slice of `Metadata`
///
/// This slice excludes `StreamInfo`, which is located in `Stream::info`.
/// Everything else is related to metadata for the FLAC stream is in the
/// slice.
#[inline]
pub fn metadata(&self) -> &[Metadata] {
&self.metadata
}
/// Constructs a decoder with the given file name.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::NotFound)` is returned when the given
/// filename isn't found.
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn from_file(filename: &str) -> Result<StreamReader<File>, ErrorKind> {
File::open(filename).map_err(|e| ErrorKind::IO(e.kind()))
.and_then(|file| {
let producer = ReadStream::new(file);
Stream::from_stream_producer(producer)
})
}
/// Constructs a decoder with the given buffer.
///
/// This constructor assumes that an entire FLAC file is in the buffer.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn from_buffer(buffer: &[u8]) -> Result<StreamBuffer, ErrorKind> {
let producer = ByteStream::new(buffer);
Stream::from_stream_producer(producer)
}
fn from_stream_producer(mut producer: P) -> Result<Self, ErrorKind> {
let mut stream_info = Default::default();
let mut metadata = Vec::new();
many_metadata(&mut producer, |block| {
if let metadata::Data::StreamInfo(info) = block.data {
stream_info = info;
} else {
metadata.push(block);
}
}).map(|_| {
Stream {
info: stream_info,
metadata: metadata,
producer: producer,
}
})
}
/// Returns an iterator over the decoded samples.
#[inline]
pub fn iter<S: SampleSize>(&mut self) -> Iter<P, S::Extended> {
let samples_left = self.info.total_samples;
let channels = self.info.channels as usize;
let block_size = self.info.max_block_size as usize;
let buffer_size = block_size * channels;
Iter {
stream: self,
channel: 0,
block_size: 0,
sample_index: 0,
samples_left: samples_left,
buffer: vec![S::Extended::from_i8(0); buffer_size]
}
}
fn next_frame<S>(&mut self, buffer: &mut [S]) -> Option<usize>
where S: Sample {
let stream_info = &self.info;
loop {
match self.producer.parse(|i| frame_parser(i, stream_info, buffer)) {
Ok(frame) => {
let channels = frame.header.channels as usize;
let block_size = frame.header.block_size as usize;
let subframes = frame.subframes[0..channels].iter();
for (channel, subframe) in subframes.enumerate() {
let start = channel * block_size;
let end = (channel + 1) * block_size;
let output = &mut buffer[start..end];
subframe::decode(&subframe, block_size, output);
}
frame::decode(frame.header.channel_assignment, buffer);
return Some(block_size);
}
Err(ErrorKind::Continue) => continue,
Err(_) => return None,
}
}
}
}
/// An iterator over a reference of the decoded FLAC stream.
pub struct Iter<'a, P, S>
where P: 'a + StreamProducer,
S: Sample{
stream: &'a mut Stream<P>,
channel: usize,
block_size: usize,
sample_index: usize,
samples_left: u64,
buffer: Vec<S>,
}
impl<'a, P, S> Iterator for Iter<'a, P, S>
where P: StreamProducer,
S: Sample {
type Item = S::Normal;
fn next(&mut self) -> Option<Self::Item> {
if self.sample_index == self.block_size {
let buffer = &mut self.buffer;
if let Some(block_size) = self.stream.next_frame(buffer) {
self.sample_index = 0;
self.block_size = block_size;
} else {
return None;
}
}
let channels = self.stream.info.channels as usize;
let index = self.sample_index + (self.channel * self.block_size);
let sample = unsafe { *self.buffer.get_unchecked(index) };
self.channel += 1;
// Reset current channel
if self.channel == channels {
self.channel = 0;
self.sample_index += 1;
self.samples_left -= 1;
}
S::to_normal(sample)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let samples_left = self.samples_left as usize;
let max_value = usize::max_value() as u64;
// There is a chance that samples_left will be larger than a usize since
// it is a u64. Make the upper bound None when it is.
if self.samples_left > max_value {
(samples_left, None)
} else {
(samples_left, Some(samples_left))
}
}
}
//impl<'a, P, S> IntoIterator for &'a mut Stream<P>
// where P: StreamProducer,
// S: Sample {
// type Item = S::Normal;
// type IntoIter = Iter<'a, P, S>;
//
// #[inline]
// fn into_iter(self) -> Self::IntoIter {
// self.iter()
// }
//}
| {
let producer = ReadStream::new(reader);
Stream::from_stream_producer(producer)
} | identifier_body |
stream.rs | use metadata;
use frame;
use subframe;
use metadata::{Metadata, StreamInfo};
use frame::frame_parser;
use utility::{
ErrorKind, ByteStream, ReadStream, Sample, SampleSize, StreamProducer,
many_metadata,
};
use std::io;
use std::usize;
use std::fs::File;
/// FLAC stream that decodes and hold file information.
pub struct Stream<P: StreamProducer> {
info: StreamInfo,
metadata: Vec<Metadata>,
producer: P,
}
/// Alias for a FLAC stream produced from `Read`.
pub type StreamReader<R> = Stream<ReadStream<R>>;
/// Alias for a FLAC stream produced from a byte stream buffer.
pub type StreamBuffer<'a> = Stream<ByteStream<'a>>;
impl<P> Stream<P> where P: StreamProducer {
/// Constructor for the default state of a FLAC stream.
#[inline]
pub fn new<R: io::Read>(reader: R) -> Result<StreamReader<R>, ErrorKind> {
let producer = ReadStream::new(reader);
Stream::from_stream_producer(producer)
}
/// Returns information for the current stream.
#[inline]
pub fn info(&self) -> StreamInfo {
self.info
}
/// Returns a slice of `Metadata`
///
/// This slice excludes `StreamInfo`, which is located in `Stream::info`.
/// Everything else is related to metadata for the FLAC stream is in the
/// slice.
#[inline]
pub fn metadata(&self) -> &[Metadata] {
&self.metadata
}
/// Constructs a decoder with the given file name.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::NotFound)` is returned when the given
/// filename isn't found.
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn from_file(filename: &str) -> Result<StreamReader<File>, ErrorKind> {
File::open(filename).map_err(|e| ErrorKind::IO(e.kind()))
.and_then(|file| {
let producer = ReadStream::new(file);
Stream::from_stream_producer(producer)
})
}
/// Constructs a decoder with the given buffer.
///
/// This constructor assumes that an entire FLAC file is in the buffer.
///
/// # Failures
///
/// * `ErrorKind::IO(io::ErrorKind::InvalidData)` is returned when the data
/// within the file isn't valid FLAC data.
/// * Several different parser specific errors that are structured as
/// `ErrorKind::<parser_name>Parser`.
/// * Several different invalidation specific errors that are
/// structured as `ErrorKind::Invalid<invalidation_name>`.
#[inline]
pub fn from_buffer(buffer: &[u8]) -> Result<StreamBuffer, ErrorKind> {
let producer = ByteStream::new(buffer);
Stream::from_stream_producer(producer)
}
fn from_stream_producer(mut producer: P) -> Result<Self, ErrorKind> {
let mut stream_info = Default::default();
let mut metadata = Vec::new(); | stream_info = info;
} else {
metadata.push(block);
}
}).map(|_| {
Stream {
info: stream_info,
metadata: metadata,
producer: producer,
}
})
}
/// Returns an iterator over the decoded samples.
#[inline]
pub fn iter<S: SampleSize>(&mut self) -> Iter<P, S::Extended> {
let samples_left = self.info.total_samples;
let channels = self.info.channels as usize;
let block_size = self.info.max_block_size as usize;
let buffer_size = block_size * channels;
Iter {
stream: self,
channel: 0,
block_size: 0,
sample_index: 0,
samples_left: samples_left,
buffer: vec![S::Extended::from_i8(0); buffer_size]
}
}
fn next_frame<S>(&mut self, buffer: &mut [S]) -> Option<usize>
where S: Sample {
let stream_info = &self.info;
loop {
match self.producer.parse(|i| frame_parser(i, stream_info, buffer)) {
Ok(frame) => {
let channels = frame.header.channels as usize;
let block_size = frame.header.block_size as usize;
let subframes = frame.subframes[0..channels].iter();
for (channel, subframe) in subframes.enumerate() {
let start = channel * block_size;
let end = (channel + 1) * block_size;
let output = &mut buffer[start..end];
subframe::decode(&subframe, block_size, output);
}
frame::decode(frame.header.channel_assignment, buffer);
return Some(block_size);
}
Err(ErrorKind::Continue) => continue,
Err(_) => return None,
}
}
}
}
/// An iterator over a reference of the decoded FLAC stream.
pub struct Iter<'a, P, S>
where P: 'a + StreamProducer,
S: Sample{
stream: &'a mut Stream<P>,
channel: usize,
block_size: usize,
sample_index: usize,
samples_left: u64,
buffer: Vec<S>,
}
impl<'a, P, S> Iterator for Iter<'a, P, S>
where P: StreamProducer,
S: Sample {
type Item = S::Normal;
fn next(&mut self) -> Option<Self::Item> {
if self.sample_index == self.block_size {
let buffer = &mut self.buffer;
if let Some(block_size) = self.stream.next_frame(buffer) {
self.sample_index = 0;
self.block_size = block_size;
} else {
return None;
}
}
let channels = self.stream.info.channels as usize;
let index = self.sample_index + (self.channel * self.block_size);
let sample = unsafe { *self.buffer.get_unchecked(index) };
self.channel += 1;
// Reset current channel
if self.channel == channels {
self.channel = 0;
self.sample_index += 1;
self.samples_left -= 1;
}
S::to_normal(sample)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let samples_left = self.samples_left as usize;
let max_value = usize::max_value() as u64;
// There is a chance that samples_left will be larger than a usize since
// it is a u64. Make the upper bound None when it is.
if self.samples_left > max_value {
(samples_left, None)
} else {
(samples_left, Some(samples_left))
}
}
}
//impl<'a, P, S> IntoIterator for &'a mut Stream<P>
// where P: StreamProducer,
// S: Sample {
// type Item = S::Normal;
// type IntoIter = Iter<'a, P, S>;
//
// #[inline]
// fn into_iter(self) -> Self::IntoIter {
// self.iter()
// }
//} |
many_metadata(&mut producer, |block| {
if let metadata::Data::StreamInfo(info) = block.data { | random_line_split |
qtserialport_uk.ts | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="uk_UA">
<context>
<name>QSerialPort</name>
<message>
<source>No error</source>
<translation>Немає помилки</translation>
</message>
<message>
<source>Device is already open</source>
<translation>Пристрій вже відкрито</translation>
</message>
<message>
<source>Device is not open</source>
<translation>Пристрій не відкрито</translation>
</message>
<message>
<source>Operation timed out</source>
<translation>Час очікування на операцію вичерпано</translation>
</message>
<message>
<source>Error reading from device</source>
<translation>Помилка читання з пристрою</translation>
</message>
<message>
<source>Error writing to device</source>
<translation>Помилка запису до пристрою</translation>
</message>
<message>
<source>Device disappeared from the system</source>
<translation>Пристрій зник із системи</translation>
</message>
<message>
<source>Unsupported open mode</source>
<translation>Непідтримуваний режим відкриття</translation>
</message>
<message>
<source>The device supports only the ignoring policy</source>
<translation>Пристрій підтримує лише політику виправлення помилок "ігнорувати"</translation> | <translation>Помилка доступу під час створення файлу блокування</translation>
</message>
<message>
<source>Permission error while locking the device</source>
<translation>Помилка доступу під час блокування пристрою</translation>
</message>
<message>
<source>Cannot set custom speed for one direction</source>
<translation>Неможливо встановити власну швидкість лише для одного напрямку</translation>
</message>
<message>
<source>No suitable custom baud rate divisor</source>
<translation>Немає відповідного дільника для власної швидкості передавання</translation>
</message>
<message>
<source>Custom baud rate is not supported</source>
<translation>Власна швидкість передавання не підтримується</translation>
</message>
<message>
<source>Invalid baud rate value</source>
<translation>Неправильне значення швидкості передавання</translation>
</message>
<message>
<source>Custom baud rate direction is unsupported</source>
<translation>Власна швидкість передавання для одного напрямку не підтримується</translation>
</message>
</context>
</TS> | </message>
<message>
<source>Permission error while creating lock file</source> | random_line_split |
glutin.rs | use glium::{
Frame,
IndexBuffer,
Surface,
VertexBuffer,
glutin,
program,
texture,
};
use glium::index::PrimitiveType;
use glium::backend::glutin_backend::GlutinFacade;
use super::super::gamestate::*;
use super::sound::{
Player,
PortAudio,
};
#[derive(Copy, Clone)]
// Non-snake case names mandated by shaders
#[allow(non_snake_case)]
struct Vertex {
InVertex: [f32; 3],
InTexCoord0: [f32; 2]
}
implement_vertex!(Vertex, InVertex, InTexCoord0);
// clockwise order
const SQUARE_VERTICES: [Vertex; 4] = [
Vertex {
InVertex: [ 0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 1.0f32]
},
Vertex {
InVertex: [ 0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 1.0f32]
},
];
const SQUARE_INDICES: [u16; 6] = [
0,3,1,
2,1,3
];
/// Runs tetris in a GL context managed by the glium library
pub fn run_tetris() {
use super::RenderData;
use glium::DisplayBuild;
use std::sync::{Arc,Mutex,MutexGuard};
use std::cell::Ref;
use std::convert::From;
const WIDTH: u32 = 400;
const HEIGHT: u32 = 800;
let display = glutin::WindowBuilder::new()
.with_dimensions(WIDTH, HEIGHT)
.build_glium()
.unwrap();
let vertex_buffer = VertexBuffer::new(&display, &SQUARE_VERTICES).unwrap();
let index_buffer = IndexBuffer::new(&display, PrimitiveType::TrianglesList, &SQUARE_INDICES).unwrap();
let program = load_shaders(&display).unwrap();
let image = gen_image();
let texture = texture::Texture2d::new(&display, image).unwrap();
let data_mutex: Arc<Mutex<Option<RenderData>>> = Arc::new(Mutex::new(None));
let mut game: game::Game;
{
let data_mutex = data_mutex.clone();
let callback = move |field: Ref<field::Field>, current: &piece::Piece, ghost: Option<&piece::Piece>| {
let new_data = RenderData {
current: From::from(current),
field: field.clone(),
ghost: match ghost {
Some(piece) => Some(From::from(piece)),
None => None
},
};
let mut data: MutexGuard<Option<super::RenderData>> = data_mutex.lock().unwrap();
*data = Some(new_data);
};
game = game::Game::new(Box::new(callback));
}
game.run().unwrap();
let pa = PortAudio::new().unwrap();
let mut player = Player::new();
player.start_music(&pa).unwrap();
loop {
for ev in display.poll_events() {
match ev {
glutin::Event::Closed => return,
glutin::Event::KeyboardInput(state, byte, opt) => key_input(&mut game, &pa, &mut player, state, byte, opt),
_ => ()
}
}
let data_opt: Option<RenderData> = data_mutex.lock().unwrap().clone();
if let Some(data) = data_opt {
draw_frame(&display, data, &vertex_buffer, &index_buffer, &program, &texture);
}
if game.is_game_over() {
player.reset_music().unwrap();
}
}
}
fn load_shaders(display: &GlutinFacade) -> Result<program::Program, program::ProgramCreationError> {
use std::fs::*;
use std::io::Read;
use std::path::Path;
const VERT_PATH: &'static str = "shaders/simple.vert";
const FRAG_PATH: &'static str = "shaders/default.frag";
const PROJECT_ROOT: &'static str = "../../";
let vert_path = Path::new(PROJECT_ROOT).join(Path::new(VERT_PATH));
let frag_path = Path::new(PROJECT_ROOT).join(Path::new( FRAG_PATH));
let mut vertex_file: File = File::open(vert_path.clone()).expect(&format!("couldn't open {:?}", vert_path));
let mut frag_file: File = File::open(frag_path.clone()).expect(&format!("couldn't open {:?}", frag_path));
let mut vertex_str = String::new();
let mut frag_str = String::new();
vertex_file.read_to_string(&mut vertex_str).expect(&format!("couldn't read vertex_file {:?}", vertex_file));
frag_file.read_to_string(&mut frag_str).expect(&format!("couldn't read frag_file {:?}", frag_file));
program::Program::from_source(display, &vertex_str, &frag_str, None)
}
fn draw_block<V:Copy>(target: &mut Frame,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
offset: (f32, f32), color: (f32, f32, f32, f32), tex: &texture::Texture2d) -> ()
{
let uniforms = uniform! {
offset: offset,
tint: color,
msampler: tex,
};
target.draw(v_buff, i_buff, program, &uniforms, &Default::default()).unwrap();
}
fn draw_frame<V: Copy>(display: &GlutinFacade,
data: super::RenderData,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
tex: &texture::Texture2d) -> ()
{
use super::super::gamestate::field;
use super::super::gamestate::piece::Type;
use super::super::gamestate::Coord;
//const WHITE_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 1.0f32, 1.0f32);
const GREY_COLOR: (f32, f32, f32, f32) = (0.6f32, 0.6f32, 0.6f32, 0.6f32);
const I_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 0.0f32, 1.0f32);
const J_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 0.0f32, 1.0f32);
const L_COLOR: (f32, f32, f32, f32) = (0.0f32, 0.0f32, 1.0f32, 1.0f32);
const O_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 0.0f32, 1.0f32);
const S_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 1.0f32, 1.0f32);
const T_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 1.0f32, 1.0f32);
const Z_COLOR: (f32, f32, f32, f32) = (0.5f32, 1.0f32, 0.5f32, 1.0f32);
let mut frame: Frame = display.draw();
frame.clear_color(0.0, 0.0, 0.0, 1.0);
// Draw blocks in field
for i in 0..field::WIDTH {
for j in 0..field::HEIGHT {
if data.field.get(Coord::new(i as i32, j as i32)).unwrap() {
draw_block(&mut frame, v_buff, i_buff, program,
(i as f32, j as f32),
GREY_COLOR, tex);
}
}
}
// Draw current piece
let color = match data.current.typ {
Type::I => I_COLOR,
Type::J => J_COLOR,
Type::L => L_COLOR,
Type::O => O_COLOR,
Type::S => S_COLOR,
Type::T => T_COLOR,
Type::Z => Z_COLOR
};
for block in &data.current.blocks {
draw_block(&mut frame, v_buff, i_buff, program,
(block.x as f32, block.y as f32),
color, tex);
}
frame.finish().unwrap();
}
fn | () -> texture::RawImage2d<'static, (f32, f32, f32)> {
use std::cmp::min;
use std::mem;
use glium::texture::ClientFormat;
const TEXDIMENSION: u32 = 256;
const TEXBUFFSIZE: usize = (TEXDIMENSION*TEXDIMENSION) as usize;
let mut raw_data: Vec<(f32, f32, f32)> = Vec::new();
for i in 0..TEXDIMENSION {
for j in 0..TEXDIMENSION {
let idist = min(TEXDIMENSION-i, i);
let jdist = min(TEXDIMENSION-j, j);
let dist = min(idist, jdist);
let value: f32 = (dist as f32) / (TEXDIMENSION as f32) + 0.5f32;
raw_data.push((value, value, value));
}
}
assert_eq!(raw_data.len(), TEXBUFFSIZE);
let mut image = texture::RawImage2d::from_raw_rgb(raw_data, (TEXDIMENSION, TEXDIMENSION));
match image.format {
ClientFormat::F32F32F32 => (),
_ => {
println!("correcting wrong format: {:?}", image.format);
image.format = ClientFormat::F32F32F32;
}
}
assert!(image.data.len() == image.width as usize * image.height as usize * image.format.get_size() / mem::size_of::<(f32, f32, f32)>(),
"size mismatch: len {:?}, width {:?}, height {:?}, get_size() {:?}, size_of {:?}",
image.data.len(), image.width, image.height, image.format.get_size(), mem::size_of::<(f32, f32, f32)>());
image
}
fn key_input<'pa>(game: &mut game::Game, pa: &'pa PortAudio, player: &mut Player<'pa>, state: glutin::ElementState, _: u8, opt: Option<glutin::VirtualKeyCode>) -> () {
use glium::glutin::VirtualKeyCode;
use gamestate::piece::Input;
let input: Option<Input>;
if state == glutin::ElementState::Pressed {
if let Some(code) = opt {
input = match code {
VirtualKeyCode::Down => Some(Input::HardDrop),
VirtualKeyCode::E => Some(Input::RotateCW),
VirtualKeyCode::Left => Some(Input::ShiftLeft),
VirtualKeyCode::Q => Some(Input::RotateCCW),
VirtualKeyCode::Return => Some(Input::HardDrop),
VirtualKeyCode::Right => Some(Input::ShiftRight),
VirtualKeyCode::P => {
game.pause().or_else(|_|{game.run()}).expect("If pause fails run should always succeed");
player.toggle_play_music(pa).unwrap();
None
},
_ => None
}
} else {
input = None;
}
} else {
input = None;
}
if let Some(input) = input {
let _ = game.queue_input(input);
}
}
| gen_image | identifier_name |
glutin.rs | use glium::{
Frame,
IndexBuffer,
Surface,
VertexBuffer,
glutin,
program,
texture,
};
use glium::index::PrimitiveType;
use glium::backend::glutin_backend::GlutinFacade;
use super::super::gamestate::*;
use super::sound::{
Player,
PortAudio,
};
#[derive(Copy, Clone)]
// Non-snake case names mandated by shaders
#[allow(non_snake_case)]
struct Vertex {
InVertex: [f32; 3],
InTexCoord0: [f32; 2]
}
implement_vertex!(Vertex, InVertex, InTexCoord0);
// clockwise order
const SQUARE_VERTICES: [Vertex; 4] = [
Vertex {
InVertex: [ 0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 1.0f32]
},
Vertex {
InVertex: [ 0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 1.0f32]
},
];
const SQUARE_INDICES: [u16; 6] = [
0,3,1,
2,1,3
];
/// Runs tetris in a GL context managed by the glium library
pub fn run_tetris() {
use super::RenderData;
use glium::DisplayBuild;
use std::sync::{Arc,Mutex,MutexGuard};
use std::cell::Ref;
use std::convert::From;
const WIDTH: u32 = 400;
const HEIGHT: u32 = 800;
let display = glutin::WindowBuilder::new()
.with_dimensions(WIDTH, HEIGHT)
.build_glium()
.unwrap();
let vertex_buffer = VertexBuffer::new(&display, &SQUARE_VERTICES).unwrap();
let index_buffer = IndexBuffer::new(&display, PrimitiveType::TrianglesList, &SQUARE_INDICES).unwrap();
let program = load_shaders(&display).unwrap();
let image = gen_image();
let texture = texture::Texture2d::new(&display, image).unwrap();
let data_mutex: Arc<Mutex<Option<RenderData>>> = Arc::new(Mutex::new(None));
let mut game: game::Game;
{
let data_mutex = data_mutex.clone();
let callback = move |field: Ref<field::Field>, current: &piece::Piece, ghost: Option<&piece::Piece>| {
let new_data = RenderData {
current: From::from(current),
field: field.clone(),
ghost: match ghost {
Some(piece) => Some(From::from(piece)),
None => None
},
};
let mut data: MutexGuard<Option<super::RenderData>> = data_mutex.lock().unwrap();
*data = Some(new_data);
};
game = game::Game::new(Box::new(callback));
}
game.run().unwrap();
let pa = PortAudio::new().unwrap();
let mut player = Player::new();
player.start_music(&pa).unwrap();
loop {
for ev in display.poll_events() {
match ev {
glutin::Event::Closed => return,
glutin::Event::KeyboardInput(state, byte, opt) => key_input(&mut game, &pa, &mut player, state, byte, opt),
_ => ()
}
}
let data_opt: Option<RenderData> = data_mutex.lock().unwrap().clone();
if let Some(data) = data_opt {
draw_frame(&display, data, &vertex_buffer, &index_buffer, &program, &texture);
}
if game.is_game_over() {
player.reset_music().unwrap();
}
}
}
fn load_shaders(display: &GlutinFacade) -> Result<program::Program, program::ProgramCreationError> {
use std::fs::*;
use std::io::Read;
use std::path::Path;
const VERT_PATH: &'static str = "shaders/simple.vert";
const FRAG_PATH: &'static str = "shaders/default.frag";
const PROJECT_ROOT: &'static str = "../../";
let vert_path = Path::new(PROJECT_ROOT).join(Path::new(VERT_PATH));
let frag_path = Path::new(PROJECT_ROOT).join(Path::new( FRAG_PATH));
let mut vertex_file: File = File::open(vert_path.clone()).expect(&format!("couldn't open {:?}", vert_path));
let mut frag_file: File = File::open(frag_path.clone()).expect(&format!("couldn't open {:?}", frag_path));
let mut vertex_str = String::new();
let mut frag_str = String::new();
vertex_file.read_to_string(&mut vertex_str).expect(&format!("couldn't read vertex_file {:?}", vertex_file));
frag_file.read_to_string(&mut frag_str).expect(&format!("couldn't read frag_file {:?}", frag_file));
program::Program::from_source(display, &vertex_str, &frag_str, None)
}
fn draw_block<V:Copy>(target: &mut Frame,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
offset: (f32, f32), color: (f32, f32, f32, f32), tex: &texture::Texture2d) -> ()
{
let uniforms = uniform! {
offset: offset,
tint: color,
msampler: tex,
};
target.draw(v_buff, i_buff, program, &uniforms, &Default::default()).unwrap();
}
fn draw_frame<V: Copy>(display: &GlutinFacade,
data: super::RenderData,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
tex: &texture::Texture2d) -> ()
{
use super::super::gamestate::field;
use super::super::gamestate::piece::Type;
use super::super::gamestate::Coord;
//const WHITE_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 1.0f32, 1.0f32);
const GREY_COLOR: (f32, f32, f32, f32) = (0.6f32, 0.6f32, 0.6f32, 0.6f32);
const I_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 0.0f32, 1.0f32);
const J_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 0.0f32, 1.0f32);
const L_COLOR: (f32, f32, f32, f32) = (0.0f32, 0.0f32, 1.0f32, 1.0f32);
const O_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 0.0f32, 1.0f32);
const S_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 1.0f32, 1.0f32);
const T_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 1.0f32, 1.0f32);
const Z_COLOR: (f32, f32, f32, f32) = (0.5f32, 1.0f32, 0.5f32, 1.0f32);
let mut frame: Frame = display.draw();
frame.clear_color(0.0, 0.0, 0.0, 1.0);
// Draw blocks in field
for i in 0..field::WIDTH {
for j in 0..field::HEIGHT {
if data.field.get(Coord::new(i as i32, j as i32)).unwrap() {
draw_block(&mut frame, v_buff, i_buff, program,
(i as f32, j as f32),
GREY_COLOR, tex);
}
}
}
// Draw current piece
let color = match data.current.typ {
Type::I => I_COLOR,
Type::J => J_COLOR,
Type::L => L_COLOR,
Type::O => O_COLOR,
Type::S => S_COLOR,
Type::T => T_COLOR,
Type::Z => Z_COLOR
};
for block in &data.current.blocks {
draw_block(&mut frame, v_buff, i_buff, program,
(block.x as f32, block.y as f32),
color, tex);
}
frame.finish().unwrap();
}
fn gen_image() -> texture::RawImage2d<'static, (f32, f32, f32)> {
use std::cmp::min;
use std::mem;
use glium::texture::ClientFormat;
const TEXDIMENSION: u32 = 256;
const TEXBUFFSIZE: usize = (TEXDIMENSION*TEXDIMENSION) as usize;
let mut raw_data: Vec<(f32, f32, f32)> = Vec::new();
for i in 0..TEXDIMENSION {
for j in 0..TEXDIMENSION {
let idist = min(TEXDIMENSION-i, i);
let jdist = min(TEXDIMENSION-j, j);
let dist = min(idist, jdist);
let value: f32 = (dist as f32) / (TEXDIMENSION as f32) + 0.5f32;
raw_data.push((value, value, value));
}
}
assert_eq!(raw_data.len(), TEXBUFFSIZE);
let mut image = texture::RawImage2d::from_raw_rgb(raw_data, (TEXDIMENSION, TEXDIMENSION));
match image.format {
ClientFormat::F32F32F32 => (),
_ => |
}
assert!(image.data.len() == image.width as usize * image.height as usize * image.format.get_size() / mem::size_of::<(f32, f32, f32)>(),
"size mismatch: len {:?}, width {:?}, height {:?}, get_size() {:?}, size_of {:?}",
image.data.len(), image.width, image.height, image.format.get_size(), mem::size_of::<(f32, f32, f32)>());
image
}
fn key_input<'pa>(game: &mut game::Game, pa: &'pa PortAudio, player: &mut Player<'pa>, state: glutin::ElementState, _: u8, opt: Option<glutin::VirtualKeyCode>) -> () {
use glium::glutin::VirtualKeyCode;
use gamestate::piece::Input;
let input: Option<Input>;
if state == glutin::ElementState::Pressed {
if let Some(code) = opt {
input = match code {
VirtualKeyCode::Down => Some(Input::HardDrop),
VirtualKeyCode::E => Some(Input::RotateCW),
VirtualKeyCode::Left => Some(Input::ShiftLeft),
VirtualKeyCode::Q => Some(Input::RotateCCW),
VirtualKeyCode::Return => Some(Input::HardDrop),
VirtualKeyCode::Right => Some(Input::ShiftRight),
VirtualKeyCode::P => {
game.pause().or_else(|_|{game.run()}).expect("If pause fails run should always succeed");
player.toggle_play_music(pa).unwrap();
None
},
_ => None
}
} else {
input = None;
}
} else {
input = None;
}
if let Some(input) = input {
let _ = game.queue_input(input);
}
}
| {
println!("correcting wrong format: {:?}", image.format);
image.format = ClientFormat::F32F32F32;
} | conditional_block |
glutin.rs | use glium::{
Frame,
IndexBuffer,
Surface,
VertexBuffer,
glutin,
program,
texture,
};
use glium::index::PrimitiveType;
use glium::backend::glutin_backend::GlutinFacade;
use super::super::gamestate::*;
use super::sound::{
Player,
PortAudio,
};
#[derive(Copy, Clone)]
// Non-snake case names mandated by shaders
#[allow(non_snake_case)]
struct Vertex {
InVertex: [f32; 3],
InTexCoord0: [f32; 2]
}
implement_vertex!(Vertex, InVertex, InTexCoord0);
// clockwise order
const SQUARE_VERTICES: [Vertex; 4] = [
Vertex {
InVertex: [ 0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 1.0f32]
},
Vertex {
InVertex: [ 0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [1.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32,-0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 0.0f32]
},
Vertex {
InVertex: [-0.5f32, 0.5f32, 0.0f32],
InTexCoord0: [0.0f32, 1.0f32]
},
];
const SQUARE_INDICES: [u16; 6] = [
0,3,1,
2,1,3
];
/// Runs tetris in a GL context managed by the glium library
pub fn run_tetris() {
use super::RenderData;
use glium::DisplayBuild;
use std::sync::{Arc,Mutex,MutexGuard};
use std::cell::Ref;
use std::convert::From;
const WIDTH: u32 = 400;
const HEIGHT: u32 = 800;
let display = glutin::WindowBuilder::new()
.with_dimensions(WIDTH, HEIGHT)
.build_glium()
.unwrap();
let vertex_buffer = VertexBuffer::new(&display, &SQUARE_VERTICES).unwrap();
let index_buffer = IndexBuffer::new(&display, PrimitiveType::TrianglesList, &SQUARE_INDICES).unwrap();
let program = load_shaders(&display).unwrap(); | {
let data_mutex = data_mutex.clone();
let callback = move |field: Ref<field::Field>, current: &piece::Piece, ghost: Option<&piece::Piece>| {
let new_data = RenderData {
current: From::from(current),
field: field.clone(),
ghost: match ghost {
Some(piece) => Some(From::from(piece)),
None => None
},
};
let mut data: MutexGuard<Option<super::RenderData>> = data_mutex.lock().unwrap();
*data = Some(new_data);
};
game = game::Game::new(Box::new(callback));
}
game.run().unwrap();
let pa = PortAudio::new().unwrap();
let mut player = Player::new();
player.start_music(&pa).unwrap();
loop {
for ev in display.poll_events() {
match ev {
glutin::Event::Closed => return,
glutin::Event::KeyboardInput(state, byte, opt) => key_input(&mut game, &pa, &mut player, state, byte, opt),
_ => ()
}
}
let data_opt: Option<RenderData> = data_mutex.lock().unwrap().clone();
if let Some(data) = data_opt {
draw_frame(&display, data, &vertex_buffer, &index_buffer, &program, &texture);
}
if game.is_game_over() {
player.reset_music().unwrap();
}
}
}
fn load_shaders(display: &GlutinFacade) -> Result<program::Program, program::ProgramCreationError> {
use std::fs::*;
use std::io::Read;
use std::path::Path;
const VERT_PATH: &'static str = "shaders/simple.vert";
const FRAG_PATH: &'static str = "shaders/default.frag";
const PROJECT_ROOT: &'static str = "../../";
let vert_path = Path::new(PROJECT_ROOT).join(Path::new(VERT_PATH));
let frag_path = Path::new(PROJECT_ROOT).join(Path::new( FRAG_PATH));
let mut vertex_file: File = File::open(vert_path.clone()).expect(&format!("couldn't open {:?}", vert_path));
let mut frag_file: File = File::open(frag_path.clone()).expect(&format!("couldn't open {:?}", frag_path));
let mut vertex_str = String::new();
let mut frag_str = String::new();
vertex_file.read_to_string(&mut vertex_str).expect(&format!("couldn't read vertex_file {:?}", vertex_file));
frag_file.read_to_string(&mut frag_str).expect(&format!("couldn't read frag_file {:?}", frag_file));
program::Program::from_source(display, &vertex_str, &frag_str, None)
}
fn draw_block<V:Copy>(target: &mut Frame,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
offset: (f32, f32), color: (f32, f32, f32, f32), tex: &texture::Texture2d) -> ()
{
let uniforms = uniform! {
offset: offset,
tint: color,
msampler: tex,
};
target.draw(v_buff, i_buff, program, &uniforms, &Default::default()).unwrap();
}
fn draw_frame<V: Copy>(display: &GlutinFacade,
data: super::RenderData,
v_buff: &VertexBuffer<V>, i_buff: &IndexBuffer<u16>,
program: &program::Program,
tex: &texture::Texture2d) -> ()
{
use super::super::gamestate::field;
use super::super::gamestate::piece::Type;
use super::super::gamestate::Coord;
//const WHITE_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 1.0f32, 1.0f32);
const GREY_COLOR: (f32, f32, f32, f32) = (0.6f32, 0.6f32, 0.6f32, 0.6f32);
const I_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 0.0f32, 1.0f32);
const J_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 0.0f32, 1.0f32);
const L_COLOR: (f32, f32, f32, f32) = (0.0f32, 0.0f32, 1.0f32, 1.0f32);
const O_COLOR: (f32, f32, f32, f32) = (1.0f32, 1.0f32, 0.0f32, 1.0f32);
const S_COLOR: (f32, f32, f32, f32) = (1.0f32, 0.0f32, 1.0f32, 1.0f32);
const T_COLOR: (f32, f32, f32, f32) = (0.0f32, 1.0f32, 1.0f32, 1.0f32);
const Z_COLOR: (f32, f32, f32, f32) = (0.5f32, 1.0f32, 0.5f32, 1.0f32);
let mut frame: Frame = display.draw();
frame.clear_color(0.0, 0.0, 0.0, 1.0);
// Draw blocks in field
for i in 0..field::WIDTH {
for j in 0..field::HEIGHT {
if data.field.get(Coord::new(i as i32, j as i32)).unwrap() {
draw_block(&mut frame, v_buff, i_buff, program,
(i as f32, j as f32),
GREY_COLOR, tex);
}
}
}
// Draw current piece
let color = match data.current.typ {
Type::I => I_COLOR,
Type::J => J_COLOR,
Type::L => L_COLOR,
Type::O => O_COLOR,
Type::S => S_COLOR,
Type::T => T_COLOR,
Type::Z => Z_COLOR
};
for block in &data.current.blocks {
draw_block(&mut frame, v_buff, i_buff, program,
(block.x as f32, block.y as f32),
color, tex);
}
frame.finish().unwrap();
}
fn gen_image() -> texture::RawImage2d<'static, (f32, f32, f32)> {
use std::cmp::min;
use std::mem;
use glium::texture::ClientFormat;
const TEXDIMENSION: u32 = 256;
const TEXBUFFSIZE: usize = (TEXDIMENSION*TEXDIMENSION) as usize;
let mut raw_data: Vec<(f32, f32, f32)> = Vec::new();
for i in 0..TEXDIMENSION {
for j in 0..TEXDIMENSION {
let idist = min(TEXDIMENSION-i, i);
let jdist = min(TEXDIMENSION-j, j);
let dist = min(idist, jdist);
let value: f32 = (dist as f32) / (TEXDIMENSION as f32) + 0.5f32;
raw_data.push((value, value, value));
}
}
assert_eq!(raw_data.len(), TEXBUFFSIZE);
let mut image = texture::RawImage2d::from_raw_rgb(raw_data, (TEXDIMENSION, TEXDIMENSION));
match image.format {
ClientFormat::F32F32F32 => (),
_ => {
println!("correcting wrong format: {:?}", image.format);
image.format = ClientFormat::F32F32F32;
}
}
assert!(image.data.len() == image.width as usize * image.height as usize * image.format.get_size() / mem::size_of::<(f32, f32, f32)>(),
"size mismatch: len {:?}, width {:?}, height {:?}, get_size() {:?}, size_of {:?}",
image.data.len(), image.width, image.height, image.format.get_size(), mem::size_of::<(f32, f32, f32)>());
image
}
fn key_input<'pa>(game: &mut game::Game, pa: &'pa PortAudio, player: &mut Player<'pa>, state: glutin::ElementState, _: u8, opt: Option<glutin::VirtualKeyCode>) -> () {
use glium::glutin::VirtualKeyCode;
use gamestate::piece::Input;
let input: Option<Input>;
if state == glutin::ElementState::Pressed {
if let Some(code) = opt {
input = match code {
VirtualKeyCode::Down => Some(Input::HardDrop),
VirtualKeyCode::E => Some(Input::RotateCW),
VirtualKeyCode::Left => Some(Input::ShiftLeft),
VirtualKeyCode::Q => Some(Input::RotateCCW),
VirtualKeyCode::Return => Some(Input::HardDrop),
VirtualKeyCode::Right => Some(Input::ShiftRight),
VirtualKeyCode::P => {
game.pause().or_else(|_|{game.run()}).expect("If pause fails run should always succeed");
player.toggle_play_music(pa).unwrap();
None
},
_ => None
}
} else {
input = None;
}
} else {
input = None;
}
if let Some(input) = input {
let _ = game.queue_input(input);
}
} | let image = gen_image();
let texture = texture::Texture2d::new(&display, image).unwrap();
let data_mutex: Arc<Mutex<Option<RenderData>>> = Arc::new(Mutex::new(None));
let mut game: game::Game; | random_line_split |
task-comm-11.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
extern mod extra;
use std::comm;
use std::task;
fn start(c: &comm::Chan<comm::Chan<int>>) {
let (p, ch) = comm::stream();
c.send(ch);
}
pub fn main() {
let (p, ch) = comm::stream();
let child = task::spawn(|| start(&ch) );
let c = p.recv();
} | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | random_line_split |
task-comm-11.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
extern mod extra;
use std::comm;
use std::task;
fn start(c: &comm::Chan<comm::Chan<int>>) {
let (p, ch) = comm::stream();
c.send(ch);
}
pub fn main() | {
let (p, ch) = comm::stream();
let child = task::spawn(|| start(&ch) );
let c = p.recv();
} | identifier_body |
|
task-comm-11.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
extern mod extra;
use std::comm;
use std::task;
fn start(c: &comm::Chan<comm::Chan<int>>) {
let (p, ch) = comm::stream();
c.send(ch);
}
pub fn | () {
let (p, ch) = comm::stream();
let child = task::spawn(|| start(&ch) );
let c = p.recv();
}
| main | identifier_name |
test_exposed.py | """ Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def _get_exposed(tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj)
def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tai | l = name.split('_', 1)
meth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref
| conditional_block |
|
test_exposed.py | """ Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def _get_exposed(tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj) | def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tail = name.split('_', 1)
meth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref | random_line_split |
|
test_exposed.py | """ Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def | (tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj)
def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tail = name.split('_', 1)
meth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref
| _get_exposed | identifier_name |
test_exposed.py | """ Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def _get_exposed(tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
|
def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tail = name.split('_', 1)
meth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref
| for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj) | identifier_body |
app.js | 'use strict';
angular.module('playgroundApp', [
'playgroundApp.filters',
'playgroundApp.services',
'playgroundApp.directives',
'ngRoute',
'ui.bootstrap',
'ui',
])
.config(function($locationProvider, $routeProvider, $httpProvider,
$dialogProvider) { |
$locationProvider.html5Mode(true);
// TODO: add list of promises to be resolved for injection
// TODO: resolved promises are injected into controller
// TODO: see http://www.youtube.com/watch?v=P6KITGRQujQ
$routeProvider
.when('/playground/', {
templateUrl: '/playground/main.html',
controller: MainController,
})
.when('/playground/p/:project_id/', {
templateUrl: '/playground/project.html',
controller: ProjectController,
reloadOnSearch: false,
});
$httpProvider.interceptors.push('pgHttpInterceptor');
// TODO: test these defaults?
$dialogProvider.options({
backdropFade: true,
modalFade: true,
});
})
.value('ui.config', {
codemirror: {
lineNumbers: true,
matchBrackets: true,
autofocus: true,
undoDepth: 440, // default = 40
}
}); | random_line_split |
|
selections.js | /* globals google: true */
import Ember from 'ember';
const { later } = Ember.run;
const { on, computed, isArray } = Ember;
export default Ember.Mixin.create({
// Stores reference to google DrawingManager instance
_drawingManager: null,
/**
* [selectionsDelay time it takes to remove last selection from the map]
* @type {Number}
*/
selectionsDelay: null,
// Default to all supported mode
selectionsModes: [
'marker',
'circle',
'polygon',
'polyline',
'rectangle'
],
/**
* [_gmapSelectionsModes]
* @param {String} [observes `selectionsModes` binding options]
* @return {[Array]} [Returns array of matched google OverlayType's]
*/
_gmapSelectionsModes: computed('selectionsModes.[]', function() {
const modes = [];
if(isArray(this.get('selectionsModes')) === false) {
Ember.Logger.error('`selectionsModes` property expects an array');
}
const selectionsModes = this.get('selectionsModes').map((dm) => dm.toLowerCase());
if(selectionsModes.indexOf('marker') > -1) {
modes.push(google.maps.drawing.OverlayType.MARKER);
}
if(selectionsModes.indexOf('circle') > -1) {
modes.push(google.maps.drawing.OverlayType.CIRCLE);
}
if(selectionsModes.indexOf('polygon') > -1) {
modes.push(google.maps.drawing.OverlayType.POLYGON);
}
if(selectionsModes.indexOf('polyline') > -1) {
modes.push(google.maps.drawing.OverlayType.POLYLINE);
}
if(selectionsModes.indexOf('rectangle') > -1) {
modes.push(google.maps.drawing.OverlayType.RECTANGLE);
}
return modes;
}),
// Default to controls on top
selectionsPosition: 'top',
/**
* [_gmapSelectionsPosition ]
* @param {String} [observes `selectionsPosition` binding]
* @return {[ControlPosition]} [Returns matching google ControlPosition]
*/
_gmapSelectionsPosition: computed('selectionsPosition', function() {
let pos = 'TOP_CENTER';
if(typeof this.get('selectionsPosition') !== 'string') {
Ember.Logger.error('`selectionsPosition` property expects a string');
}
switch(Ember.String.dasherize(this.get('selectionsPosition').replace('_', '-')).toLowerCase()) {
case 'top-left':
pos = 'TOP_LEFT'; break;
case 'top-right':
pos = 'TOP_RIGHT'; break;
case 'left-top':
pos = 'LEFT_TOP'; break;
case 'right-top':
pos = 'RIGHT_TOP'; break;
case 'left':
pos = 'LEFT_CENTER'; break;
case 'left-center':
pos = 'LEFT_CENTER'; break;
case 'right':
pos = 'RIGHT_CENTER'; break;
case 'right-center':
pos = 'RIGHT_CENTER'; break;
case 'left-bottom':
pos = 'LEFT_BOTTOM'; break;
case 'right-bottom':
pos = 'RIGHT_BOTTOM'; break;
case 'bottom':
pos = 'BOTTOM_CENTER'; break;
case 'bottom-center':
pos = 'BOTTOM_CENTER'; break;
case 'bottom-left':
pos = 'BOTTOM_LEFT'; break;
case 'bottom-right':
pos = 'BOTTOM_RIGHT'; break;
}
return google.maps.ControlPosition[pos];
}),
// Default to no active selection tool
selectionsMode: '',
/**
* [_gmapSelectionsMode]
* @param {String} [observes `selectionsMode` binding]
* @return {[OverlayType|null]} [Returns matching google OverlayType]
*/
_gmapSelectionsMode: computed('selectionsMode', function() {
let mode = '';
if(typeof this.get('selectionsMode') !== 'string') {
Ember.Logger.error('`selectionsMode` property expects a string');
}
switch(this.get('selectionsMode').toLowerCase()) {
case 'marker':
mode = 'MARKER'; break;
case 'circle':
mode = 'CIRCLE'; break;
case 'polygon':
mode = 'POLYGON'; break;
case 'polyline':
mode = 'POLYLINE'; break;
case 'rectangle':
mode = 'RECTANGLE'; break;
}
return (mode ? google.maps.drawing.OverlayType[mode] : null);
}),
// Stores reference to `overlaycomplete` event
_selectionsEventOverlayComplete: null,
/**
* [_initSelections runs once per selections instance instantiation]
* [Added via `_validateSelections`]
* [Observes ('isMapLoaded', 'selections')]
*/
_initSelections: function() {
const continueSetup = (
this.get('isMapLoaded') &&
this.get('selections') &&
this.get('googleMapsSupportsDrawingManager') &&
!this.get('_drawingManager')
);
if(!continueSetup) { return; }
// Create DrawingManager Instance and store
const drawingManager = new google.maps.drawing.DrawingManager();
this.set('_drawingManager', drawingManager);
// Watch for changes to selections configuration and inital sync
this.addObserver('_drawManagerOptions', this, '_syncDrawingMangagerOptions');
this._syncDrawingMangagerOptions();
// Add the drawing manager to the map
drawingManager.setMap(this.get('map').map);
let lastSelection;
// Bind selection events
const overlayListener = google.maps.event.addListener(drawingManager, 'overlaycomplete', (event) => {
// Prohibit simultanious selections
if(lastSelection && lastSelection.map) {
lastSelection.setMap(null);
}
lastSelection = event.overlay;
if (event.type === google.maps.drawing.OverlayType.MARKER) {
this.send('selectionsMarker', event.overlay);
}
else if (event.type === google.maps.drawing.OverlayType.CIRCLE) {
this.send('selectionsCircle', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.RECTANGLE) {
this.send('selectionsRectangle', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.POLYGON) {
this.send('selectionsPolygon', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.POLYLINE) {
this.send('selectionsPolyline', event.overlay);
}
// Remove the last drawing from map
later(() => { event.overlay.setMap(null); }, this.get('selectionsDelay') || 400);
});
// create reference to event
this.set('_selectionsEventOverlayComplete', overlayListener);
// Add listener to sync user selection of map drawing controls
this.$().on('click', '.gmnoprint > div', Ember.run.bind(this, this._syncDrawingManagerModeControls));
// Remove observers added during `didInsertElement`
this.removeObserver('isMapLoaded', this, '_initSelections');
this.removeObserver('selections', this, '_initSelections');
},
/**
* [Return the configuration object for the drawingManager]
* @param {[Strings]} [Observes all relevant properties on `selections` config]
* @return {[Object]} [Drawing Manager Configuration Object]
*/
_drawManagerOptions: computed(
'selections',
'_gmapSelectionsMode',
'_gmapSelectionsModes',
'_gmapSelectionsPosition',
'selections.{visible,markerOptions,circleOptions,polygonOptions,polylineOptions,rectangleOptions}',
function() {
const isVisible = this.get('selections.visible');
const markerOptions = this.get('selections.markerOptions');
const circleOptions = this.get('selections.circleOptions');
const polygonOptions = this.get('selections.polygonOptions');
const polylineOptions = this.get('selections.polylineOptions');
const rectangleOptions = this.get('selections.rectangleOptions');
const options = {
drawingMode: this.get('_gmapSelectionsMode'),
drawingControl: (typeof isVisible === 'boolean' ? isVisible : true), // Shows or hides draw manager
drawingControlOptions: {
position: this.get('_gmapSelectionsPosition'),
drawingModes: this.get('_gmapSelectionsModes')
}
};
if(markerOptions) {
options.markerOptions = markerOptions;
}
if(circleOptions) {
options.circleOptions = circleOptions;
}
if(polygonOptions) {
options.polygonOptions = polygonOptions;
}
if(polylineOptions) {
options.polylineOptions = polylineOptions;
}
if(rectangleOptions) |
return options;
}
),
/**
* [_syncDrawingMangagerOptions finally sets the options on the drawManager instance]
* [Added via `_initSelections`]
* [Observes ('_drawManagerOptions')]
*/
_syncDrawingMangagerOptions: function() {
return this.get('_drawingManager').setOptions(this.get('_drawManagerOptions'));
},
/**
* [_syncDrawingManagerModeControls get active drawingMode and bind to parent, enforces string type if falsey]
*/
_syncDrawingManagerModeControls: function() {
const mode = this.get('_drawingManager').drawingMode || '';
this.set('selectionsMode', mode);
},
/**
* [googleMapsSupportsDrawingManager returns a boolean indicating if DrawingManager is supported]
* @return {[Boolean]}
*/
googleMapsSupportsDrawingManager: computed(function() {
return (
google.maps &&
google.maps.drawing &&
google.maps.drawing.DrawingManager
);
}),
/**
* [_validateSelections determines if selections can instantiate, if so adds init observers]
* @param {[String]} )[triggered on element insertion]
* @return {[Oberservers]} [if valid adds obersvers to init method]
*/
_validateSelections: on('didInsertElement', function() {
if(!this.get('selections')) { return false; }
if(!this.get('googleMapsSupportsDrawingManager')) {
throw new Error('g-map component requires the "drawing" library included in `config/environment.js`');
}
else {
// Enable selections setup
this.addObserver('isMapLoaded', this, '_initSelections');
this.addObserver('selections', this, '_initSelections');
}
}),
/**
* [_teardownSelections removes the draw manager from the map, clears up memory, and unbinds events]
* @param {[String]} [triggered on element destroy]
*/
_teardownSelections: on('willDestroyElement', function() {
const drawingManager = this.get('_drawingManager');
if(drawingManager) {
drawingManager.setMap(null);
this.set('drawingManager', null);
// Remove overlay complete listener
this.get('_selectionsEventOverlayComplete').remove();
this.set('_selectionsEventOverlayComplete', null);
// Remove select control sync listener
this.$().off('click', '.gmnoprint > div');
}
}),
actions: {
selectionsMarker: function(marker) {
this.sendAction('selectionsMarker', {
marker,
lat: marker.position.lat(),
lng: marker.position.lng()
});
},
selectionsCircle: function(circle) {
this.sendAction('selectionsCircle', {
circle,
radius: circle.getRadius(),
lat: circle.center.lat(),
lng: circle.center.lng()
});
},
selectionsRectangle: function(rectangle) {
const ne = rectangle.bounds.getNorthEast();
const sw = rectangle.bounds.getSouthWest();
this.sendAction('selectionsRectangle', {
rectangle,
bounds: [
{ lat: ne.lat(), lng: ne.lng(), location: 'northeast' }, // Northeast
{ lat: sw.lat(), lng: sw.lng(), location: 'southwest' } // Southwest
]
});
},
selectionsPolygon: function(polygon) {
let pathTarget = polygon.latLngs.getArray()[0];
if(typeof pathTarget.getArray === 'function') {
pathTarget = pathTarget.getArray();
}
this.sendAction('selectionsPolygon', {
polygon,
coords: pathTarget.map((c) => { return { lat: c.lat(), lng: c.lng() }; })
});
},
selectionsPolyline: function(polyline) {
let pathTarget = polyline.latLngs.getArray()[0];
if(typeof pathTarget.getArray === 'function') {
pathTarget = pathTarget.getArray();
}
this.sendAction('selectionsPolyline', {
polyline,
coords: pathTarget.map((c) => { return { lat: c.lat(), lng: c.lng() }; })
});
}
}
});
| {
options.rectangleOptions = rectangleOptions;
} | conditional_block |
selections.js | /* globals google: true */
import Ember from 'ember';
const { later } = Ember.run;
const { on, computed, isArray } = Ember;
export default Ember.Mixin.create({
// Stores reference to google DrawingManager instance
_drawingManager: null,
/**
* [selectionsDelay time it takes to remove last selection from the map]
* @type {Number}
*/
selectionsDelay: null,
// Default to all supported mode
selectionsModes: [
'marker',
'circle',
'polygon',
'polyline',
'rectangle'
],
/**
* [_gmapSelectionsModes]
* @param {String} [observes `selectionsModes` binding options]
* @return {[Array]} [Returns array of matched google OverlayType's]
*/
_gmapSelectionsModes: computed('selectionsModes.[]', function() {
const modes = [];
if(isArray(this.get('selectionsModes')) === false) {
Ember.Logger.error('`selectionsModes` property expects an array');
}
const selectionsModes = this.get('selectionsModes').map((dm) => dm.toLowerCase());
if(selectionsModes.indexOf('marker') > -1) {
modes.push(google.maps.drawing.OverlayType.MARKER);
}
if(selectionsModes.indexOf('circle') > -1) {
modes.push(google.maps.drawing.OverlayType.CIRCLE);
}
if(selectionsModes.indexOf('polygon') > -1) {
modes.push(google.maps.drawing.OverlayType.POLYGON);
}
if(selectionsModes.indexOf('polyline') > -1) {
modes.push(google.maps.drawing.OverlayType.POLYLINE);
}
if(selectionsModes.indexOf('rectangle') > -1) {
modes.push(google.maps.drawing.OverlayType.RECTANGLE);
}
return modes;
}),
// Default to controls on top
selectionsPosition: 'top',
/**
* [_gmapSelectionsPosition ]
* @param {String} [observes `selectionsPosition` binding]
* @return {[ControlPosition]} [Returns matching google ControlPosition]
*/
_gmapSelectionsPosition: computed('selectionsPosition', function() {
let pos = 'TOP_CENTER';
if(typeof this.get('selectionsPosition') !== 'string') {
Ember.Logger.error('`selectionsPosition` property expects a string');
}
switch(Ember.String.dasherize(this.get('selectionsPosition').replace('_', '-')).toLowerCase()) {
case 'top-left':
pos = 'TOP_LEFT'; break;
case 'top-right':
pos = 'TOP_RIGHT'; break;
case 'left-top':
pos = 'LEFT_TOP'; break;
case 'right-top':
pos = 'RIGHT_TOP'; break;
case 'left':
pos = 'LEFT_CENTER'; break;
case 'left-center':
pos = 'LEFT_CENTER'; break;
case 'right':
pos = 'RIGHT_CENTER'; break;
case 'right-center':
pos = 'RIGHT_CENTER'; break;
case 'left-bottom':
pos = 'LEFT_BOTTOM'; break;
case 'right-bottom':
pos = 'RIGHT_BOTTOM'; break;
case 'bottom':
pos = 'BOTTOM_CENTER'; break;
case 'bottom-center':
pos = 'BOTTOM_CENTER'; break;
case 'bottom-left':
pos = 'BOTTOM_LEFT'; break;
case 'bottom-right':
pos = 'BOTTOM_RIGHT'; break;
}
return google.maps.ControlPosition[pos];
}),
// Default to no active selection tool
selectionsMode: '',
/**
* [_gmapSelectionsMode]
* @param {String} [observes `selectionsMode` binding]
* @return {[OverlayType|null]} [Returns matching google OverlayType]
*/
_gmapSelectionsMode: computed('selectionsMode', function() {
let mode = '';
if(typeof this.get('selectionsMode') !== 'string') {
Ember.Logger.error('`selectionsMode` property expects a string');
}
switch(this.get('selectionsMode').toLowerCase()) {
case 'marker':
mode = 'MARKER'; break;
case 'circle':
mode = 'CIRCLE'; break;
case 'polygon':
mode = 'POLYGON'; break;
case 'polyline':
mode = 'POLYLINE'; break;
case 'rectangle':
mode = 'RECTANGLE'; break;
}
return (mode ? google.maps.drawing.OverlayType[mode] : null);
}),
// Stores reference to `overlaycomplete` event
_selectionsEventOverlayComplete: null,
/**
* [_initSelections runs once per selections instance instantiation]
* [Added via `_validateSelections`]
* [Observes ('isMapLoaded', 'selections')]
*/
_initSelections: function() {
const continueSetup = (
this.get('isMapLoaded') &&
this.get('selections') &&
this.get('googleMapsSupportsDrawingManager') &&
!this.get('_drawingManager')
);
if(!continueSetup) { return; }
// Create DrawingManager Instance and store
const drawingManager = new google.maps.drawing.DrawingManager();
this.set('_drawingManager', drawingManager);
// Watch for changes to selections configuration and inital sync
this.addObserver('_drawManagerOptions', this, '_syncDrawingMangagerOptions');
this._syncDrawingMangagerOptions();
// Add the drawing manager to the map
drawingManager.setMap(this.get('map').map);
let lastSelection;
// Bind selection events
const overlayListener = google.maps.event.addListener(drawingManager, 'overlaycomplete', (event) => {
// Prohibit simultanious selections
if(lastSelection && lastSelection.map) {
lastSelection.setMap(null);
}
lastSelection = event.overlay;
if (event.type === google.maps.drawing.OverlayType.MARKER) {
this.send('selectionsMarker', event.overlay);
}
else if (event.type === google.maps.drawing.OverlayType.CIRCLE) {
this.send('selectionsCircle', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.RECTANGLE) {
this.send('selectionsRectangle', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.POLYGON) {
this.send('selectionsPolygon', event.overlay);
}
else if(event.type === google.maps.drawing.OverlayType.POLYLINE) {
this.send('selectionsPolyline', event.overlay);
}
// Remove the last drawing from map
later(() => { event.overlay.setMap(null); }, this.get('selectionsDelay') || 400);
});
// create reference to event
this.set('_selectionsEventOverlayComplete', overlayListener);
// Add listener to sync user selection of map drawing controls
this.$().on('click', '.gmnoprint > div', Ember.run.bind(this, this._syncDrawingManagerModeControls));
// Remove observers added during `didInsertElement`
this.removeObserver('isMapLoaded', this, '_initSelections');
this.removeObserver('selections', this, '_initSelections');
},
/**
* [Return the configuration object for the drawingManager]
* @param {[Strings]} [Observes all relevant properties on `selections` config]
* @return {[Object]} [Drawing Manager Configuration Object]
*/
_drawManagerOptions: computed(
'selections',
'_gmapSelectionsMode',
'_gmapSelectionsModes',
'_gmapSelectionsPosition',
'selections.{visible,markerOptions,circleOptions,polygonOptions,polylineOptions,rectangleOptions}',
function() {
const isVisible = this.get('selections.visible');
const markerOptions = this.get('selections.markerOptions');
const circleOptions = this.get('selections.circleOptions');
const polygonOptions = this.get('selections.polygonOptions');
const polylineOptions = this.get('selections.polylineOptions');
const rectangleOptions = this.get('selections.rectangleOptions');
const options = {
drawingMode: this.get('_gmapSelectionsMode'),
drawingControl: (typeof isVisible === 'boolean' ? isVisible : true), // Shows or hides draw manager
drawingControlOptions: {
position: this.get('_gmapSelectionsPosition'),
drawingModes: this.get('_gmapSelectionsModes')
}
};
if(markerOptions) {
options.markerOptions = markerOptions;
}
if(circleOptions) {
options.circleOptions = circleOptions;
}
if(polygonOptions) {
options.polygonOptions = polygonOptions;
}
if(polylineOptions) {
options.polylineOptions = polylineOptions;
}
if(rectangleOptions) {
options.rectangleOptions = rectangleOptions;
}
return options;
}
),
/**
* [_syncDrawingMangagerOptions finally sets the options on the drawManager instance]
* [Added via `_initSelections`]
* [Observes ('_drawManagerOptions')]
*/
_syncDrawingMangagerOptions: function() {
return this.get('_drawingManager').setOptions(this.get('_drawManagerOptions'));
},
/**
* [_syncDrawingManagerModeControls get active drawingMode and bind to parent, enforces string type if falsey]
*/
_syncDrawingManagerModeControls: function() {
const mode = this.get('_drawingManager').drawingMode || '';
this.set('selectionsMode', mode);
},
/**
* [googleMapsSupportsDrawingManager returns a boolean indicating if DrawingManager is supported]
* @return {[Boolean]}
*/
googleMapsSupportsDrawingManager: computed(function() {
return (
google.maps &&
google.maps.drawing &&
google.maps.drawing.DrawingManager
);
}),
/**
* [_validateSelections determines if selections can instantiate, if so adds init observers]
* @param {[String]} )[triggered on element insertion]
* @return {[Oberservers]} [if valid adds obersvers to init method]
*/
_validateSelections: on('didInsertElement', function() {
if(!this.get('selections')) { return false; }
if(!this.get('googleMapsSupportsDrawingManager')) {
throw new Error('g-map component requires the "drawing" library included in `config/environment.js`');
}
else {
// Enable selections setup
this.addObserver('isMapLoaded', this, '_initSelections');
this.addObserver('selections', this, '_initSelections');
}
}),
/**
* [_teardownSelections removes the draw manager from the map, clears up memory, and unbinds events]
* @param {[String]} [triggered on element destroy]
*/
_teardownSelections: on('willDestroyElement', function() {
const drawingManager = this.get('_drawingManager');
if(drawingManager) {
drawingManager.setMap(null);
this.set('drawingManager', null);
// Remove overlay complete listener
this.get('_selectionsEventOverlayComplete').remove();
this.set('_selectionsEventOverlayComplete', null);
// Remove select control sync listener
this.$().off('click', '.gmnoprint > div');
}
}),
actions: {
selectionsMarker: function(marker) {
this.sendAction('selectionsMarker', {
marker,
lat: marker.position.lat(),
lng: marker.position.lng()
});
}, | this.sendAction('selectionsCircle', {
circle,
radius: circle.getRadius(),
lat: circle.center.lat(),
lng: circle.center.lng()
});
},
selectionsRectangle: function(rectangle) {
const ne = rectangle.bounds.getNorthEast();
const sw = rectangle.bounds.getSouthWest();
this.sendAction('selectionsRectangle', {
rectangle,
bounds: [
{ lat: ne.lat(), lng: ne.lng(), location: 'northeast' }, // Northeast
{ lat: sw.lat(), lng: sw.lng(), location: 'southwest' } // Southwest
]
});
},
selectionsPolygon: function(polygon) {
let pathTarget = polygon.latLngs.getArray()[0];
if(typeof pathTarget.getArray === 'function') {
pathTarget = pathTarget.getArray();
}
this.sendAction('selectionsPolygon', {
polygon,
coords: pathTarget.map((c) => { return { lat: c.lat(), lng: c.lng() }; })
});
},
selectionsPolyline: function(polyline) {
let pathTarget = polyline.latLngs.getArray()[0];
if(typeof pathTarget.getArray === 'function') {
pathTarget = pathTarget.getArray();
}
this.sendAction('selectionsPolyline', {
polyline,
coords: pathTarget.map((c) => { return { lat: c.lat(), lng: c.lng() }; })
});
}
}
}); |
selectionsCircle: function(circle) { | random_line_split |
test_templates.py | """Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id = tables.Column()
name = tables.Column()
books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries:
row['tld'], row['cc'], row['population']
# certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == False
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryT | Table):
name = tables.TextColumn()
capital = tables.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc "
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2'
| able(tables. | identifier_name |
test_templates.py | """Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id = tables.Column()
name = tables.Column()
books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries:
row['tld | certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == False
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc "
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2'
| '], row['cc'], row['population']
# | conditional_block |
test_templates.py | """Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id = tables.Column()
name = tables.Column()
books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries:
row['tld'], row['cc'], row['population']
# certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == False
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc " | "Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2' |
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \ | random_line_split |
test_templates.py | """Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id | books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries:
row['tld'], row['cc'], row['population']
# certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == False
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc "
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2'
| = tables.Column()
name = tables.Column()
| identifier_body |
serializer.test.tsx | import * as React from 'react'
import { shallow, mount } from 'enzyme'
import { Editor, EditorState, convertFromRaw } from 'draft-js'
import { Serlizer } from './serializer'
import { getContentState } from './testUtils'
describe('Editor Serilizer', () => {
it('serialize ContentState should get RawContentState', () => {
const rawState: any = getContentState('hello wolrd')
const editor = mount(
<Editor editorState={
EditorState.createWithContent(convertFromRaw(rawState))
}
onChange={ () => { } }
>
</Editor>
)
const contentState = editor.prop('editorState').getCurrentContent()
const s = Serlizer.serialize(contentState)
expect(JSON.stringify(rawState)).toEqual(JSON.stringify(s))
})
it('<Editor/> should get right textContent', () => {
const text = '你好啊 今天开心吗,BUG又少了吗'
const s: any = getContentState(text)
const c = Serlizer.deserialize(s)
const editor = mount(
<Editor | expect(editor.text()).toEqual(text)
})
}) | onChange={ () => { } }
editorState={ EditorState.createWithContent(c) }>
</Editor >
) | random_line_split |
protoc.py | # Copyright (c) 2009 Scott Stafford
# Copyright 2014 The Ostrich / by Itamar O
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
protoc.py: Protoc Builder for SCons
This Builder invokes protoc to generate C++ and Python from a .proto file.
NOTE: Java is not currently supported.
Derived from original work by Scott Stafford
(http://www.scons.org/wiki/ProtocBuilder)
"""
__author__ = "Itamar Ostricher"
import os
import re
import SCons
_PROTOCS = 'protoc'
_PROTOSUFFIX = '.proto'
_PROTOC_SCANNER_RE = re.compile(r'^import\s+\"(.+\.proto)\"\;$', re.M)
def protoc_emitter(target, source, env):
"""Return list of targets generated by Protoc builder for source."""
for src in source:
proto = os.path.splitext(str(src))[0]
if env['PROTOCPPOUT']:
|
if env['PROTOPYOUT']:
target.append('%s_pb2.py' % (proto))
return target, source
def protoc_scanner(node, env, _):
"""Return list of file nodes that `node` imports"""
contents = node.get_text_contents()
# If build location different from sources location,
# get the destination base dir as the base for imports.
nodepath = str(node.path)
srcnodepath = str(node.srcnode())
src_pos = nodepath.find(srcnodepath)
base_path = src_pos and nodepath[:src_pos-1] or ''
imports = [os.path.join(base_path, imp)
for imp in _PROTOC_SCANNER_RE.findall(contents)]
return env.File(imports)
def generate(env):
"""Add Builders, Scanners and construction variables
for protoc to the build Environment."""
try:
bldr = env['BUILDERS']['Protoc']
except KeyError:
action = SCons.Action.Action('$PROTOCOM', '$PROTOCOMSTR')
bldr = SCons.Builder.Builder(action=action,
emitter=protoc_emitter,
src_suffix='$PROTOCSRCSUFFIX')
env['BUILDERS']['Protoc'] = bldr
# pylint: disable=bad-whitespace
env['PROTOC'] = env.Detect(_PROTOCS) or 'protoc'
env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCSRCSUFFIX'] = _PROTOSUFFIX
# Default proto search path is same dir
env['PROTOPATH'] = ['.']
# Default CPP output in same dir
env['PROTOCPPOUT'] = '.'
# No default Python output
env['PROTOPYOUT'] = ''
proto_cmd = ['$PROTOC']
proto_cmd.append('${["--proto_path=%s"%(x) for x in PROTOPATH]}')
proto_cmd.append('$PROTOCFLAGS')
proto_cmd.append('${PROTOCPPOUT and "--cpp_out=%s"%(PROTOCPPOUT) or ""}')
proto_cmd.append('${PROTOPYOUT and "--python_out=%s"%(PROTOPYOUT) or ""}')
proto_cmd.append('${SOURCES}')
env['PROTOCOM'] = ' '.join(proto_cmd)
# Add the proto scanner (if it wasn't added already)
env.AppendUnique(SCANNERS=SCons.Scanner.Scanner(function=protoc_scanner,
skeys=[_PROTOSUFFIX]))
def exists(env):
"""Return True if `protoc` tool exists in the system."""
return env.Detect(_PROTOCS)
| target.append('%s.pb.cc' % (proto))
target.append('%s.pb.h' % (proto)) | conditional_block |
protoc.py | # Copyright (c) 2009 Scott Stafford
# Copyright 2014 The Ostrich / by Itamar O
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
protoc.py: Protoc Builder for SCons
This Builder invokes protoc to generate C++ and Python from a .proto file.
NOTE: Java is not currently supported.
Derived from original work by Scott Stafford
(http://www.scons.org/wiki/ProtocBuilder)
"""
__author__ = "Itamar Ostricher"
import os
import re
import SCons
_PROTOCS = 'protoc'
_PROTOSUFFIX = '.proto'
_PROTOC_SCANNER_RE = re.compile(r'^import\s+\"(.+\.proto)\"\;$', re.M)
def protoc_emitter(target, source, env):
|
def protoc_scanner(node, env, _):
"""Return list of file nodes that `node` imports"""
contents = node.get_text_contents()
# If build location different from sources location,
# get the destination base dir as the base for imports.
nodepath = str(node.path)
srcnodepath = str(node.srcnode())
src_pos = nodepath.find(srcnodepath)
base_path = src_pos and nodepath[:src_pos-1] or ''
imports = [os.path.join(base_path, imp)
for imp in _PROTOC_SCANNER_RE.findall(contents)]
return env.File(imports)
def generate(env):
"""Add Builders, Scanners and construction variables
for protoc to the build Environment."""
try:
bldr = env['BUILDERS']['Protoc']
except KeyError:
action = SCons.Action.Action('$PROTOCOM', '$PROTOCOMSTR')
bldr = SCons.Builder.Builder(action=action,
emitter=protoc_emitter,
src_suffix='$PROTOCSRCSUFFIX')
env['BUILDERS']['Protoc'] = bldr
# pylint: disable=bad-whitespace
env['PROTOC'] = env.Detect(_PROTOCS) or 'protoc'
env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCSRCSUFFIX'] = _PROTOSUFFIX
# Default proto search path is same dir
env['PROTOPATH'] = ['.']
# Default CPP output in same dir
env['PROTOCPPOUT'] = '.'
# No default Python output
env['PROTOPYOUT'] = ''
proto_cmd = ['$PROTOC']
proto_cmd.append('${["--proto_path=%s"%(x) for x in PROTOPATH]}')
proto_cmd.append('$PROTOCFLAGS')
proto_cmd.append('${PROTOCPPOUT and "--cpp_out=%s"%(PROTOCPPOUT) or ""}')
proto_cmd.append('${PROTOPYOUT and "--python_out=%s"%(PROTOPYOUT) or ""}')
proto_cmd.append('${SOURCES}')
env['PROTOCOM'] = ' '.join(proto_cmd)
# Add the proto scanner (if it wasn't added already)
env.AppendUnique(SCANNERS=SCons.Scanner.Scanner(function=protoc_scanner,
skeys=[_PROTOSUFFIX]))
def exists(env):
"""Return True if `protoc` tool exists in the system."""
return env.Detect(_PROTOCS)
| """Return list of targets generated by Protoc builder for source."""
for src in source:
proto = os.path.splitext(str(src))[0]
if env['PROTOCPPOUT']:
target.append('%s.pb.cc' % (proto))
target.append('%s.pb.h' % (proto))
if env['PROTOPYOUT']:
target.append('%s_pb2.py' % (proto))
return target, source | identifier_body |
protoc.py | # Copyright (c) 2009 Scott Stafford
# Copyright 2014 The Ostrich / by Itamar O
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
protoc.py: Protoc Builder for SCons
This Builder invokes protoc to generate C++ and Python from a .proto file.
NOTE: Java is not currently supported.
Derived from original work by Scott Stafford
(http://www.scons.org/wiki/ProtocBuilder)
"""
__author__ = "Itamar Ostricher"
import os
import re
import SCons
_PROTOCS = 'protoc'
_PROTOSUFFIX = '.proto'
_PROTOC_SCANNER_RE = re.compile(r'^import\s+\"(.+\.proto)\"\;$', re.M)
def protoc_emitter(target, source, env):
"""Return list of targets generated by Protoc builder for source."""
for src in source:
proto = os.path.splitext(str(src))[0]
if env['PROTOCPPOUT']:
target.append('%s.pb.cc' % (proto))
target.append('%s.pb.h' % (proto))
if env['PROTOPYOUT']:
target.append('%s_pb2.py' % (proto))
return target, source
def protoc_scanner(node, env, _):
"""Return list of file nodes that `node` imports"""
contents = node.get_text_contents()
# If build location different from sources location,
# get the destination base dir as the base for imports.
nodepath = str(node.path)
srcnodepath = str(node.srcnode())
src_pos = nodepath.find(srcnodepath)
base_path = src_pos and nodepath[:src_pos-1] or ''
imports = [os.path.join(base_path, imp)
for imp in _PROTOC_SCANNER_RE.findall(contents)]
return env.File(imports)
def generate(env):
"""Add Builders, Scanners and construction variables
for protoc to the build Environment."""
try:
bldr = env['BUILDERS']['Protoc']
except KeyError:
action = SCons.Action.Action('$PROTOCOM', '$PROTOCOMSTR')
bldr = SCons.Builder.Builder(action=action,
emitter=protoc_emitter,
src_suffix='$PROTOCSRCSUFFIX')
env['BUILDERS']['Protoc'] = bldr
# pylint: disable=bad-whitespace
env['PROTOC'] = env.Detect(_PROTOCS) or 'protoc' | env['PROTOPATH'] = ['.']
# Default CPP output in same dir
env['PROTOCPPOUT'] = '.'
# No default Python output
env['PROTOPYOUT'] = ''
proto_cmd = ['$PROTOC']
proto_cmd.append('${["--proto_path=%s"%(x) for x in PROTOPATH]}')
proto_cmd.append('$PROTOCFLAGS')
proto_cmd.append('${PROTOCPPOUT and "--cpp_out=%s"%(PROTOCPPOUT) or ""}')
proto_cmd.append('${PROTOPYOUT and "--python_out=%s"%(PROTOPYOUT) or ""}')
proto_cmd.append('${SOURCES}')
env['PROTOCOM'] = ' '.join(proto_cmd)
# Add the proto scanner (if it wasn't added already)
env.AppendUnique(SCANNERS=SCons.Scanner.Scanner(function=protoc_scanner,
skeys=[_PROTOSUFFIX]))
def exists(env):
"""Return True if `protoc` tool exists in the system."""
return env.Detect(_PROTOCS) | env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCSRCSUFFIX'] = _PROTOSUFFIX
# Default proto search path is same dir | random_line_split |
protoc.py | # Copyright (c) 2009 Scott Stafford
# Copyright 2014 The Ostrich / by Itamar O
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
protoc.py: Protoc Builder for SCons
This Builder invokes protoc to generate C++ and Python from a .proto file.
NOTE: Java is not currently supported.
Derived from original work by Scott Stafford
(http://www.scons.org/wiki/ProtocBuilder)
"""
__author__ = "Itamar Ostricher"
import os
import re
import SCons
_PROTOCS = 'protoc'
_PROTOSUFFIX = '.proto'
_PROTOC_SCANNER_RE = re.compile(r'^import\s+\"(.+\.proto)\"\;$', re.M)
def protoc_emitter(target, source, env):
"""Return list of targets generated by Protoc builder for source."""
for src in source:
proto = os.path.splitext(str(src))[0]
if env['PROTOCPPOUT']:
target.append('%s.pb.cc' % (proto))
target.append('%s.pb.h' % (proto))
if env['PROTOPYOUT']:
target.append('%s_pb2.py' % (proto))
return target, source
def | (node, env, _):
"""Return list of file nodes that `node` imports"""
contents = node.get_text_contents()
# If build location different from sources location,
# get the destination base dir as the base for imports.
nodepath = str(node.path)
srcnodepath = str(node.srcnode())
src_pos = nodepath.find(srcnodepath)
base_path = src_pos and nodepath[:src_pos-1] or ''
imports = [os.path.join(base_path, imp)
for imp in _PROTOC_SCANNER_RE.findall(contents)]
return env.File(imports)
def generate(env):
"""Add Builders, Scanners and construction variables
for protoc to the build Environment."""
try:
bldr = env['BUILDERS']['Protoc']
except KeyError:
action = SCons.Action.Action('$PROTOCOM', '$PROTOCOMSTR')
bldr = SCons.Builder.Builder(action=action,
emitter=protoc_emitter,
src_suffix='$PROTOCSRCSUFFIX')
env['BUILDERS']['Protoc'] = bldr
# pylint: disable=bad-whitespace
env['PROTOC'] = env.Detect(_PROTOCS) or 'protoc'
env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCSRCSUFFIX'] = _PROTOSUFFIX
# Default proto search path is same dir
env['PROTOPATH'] = ['.']
# Default CPP output in same dir
env['PROTOCPPOUT'] = '.'
# No default Python output
env['PROTOPYOUT'] = ''
proto_cmd = ['$PROTOC']
proto_cmd.append('${["--proto_path=%s"%(x) for x in PROTOPATH]}')
proto_cmd.append('$PROTOCFLAGS')
proto_cmd.append('${PROTOCPPOUT and "--cpp_out=%s"%(PROTOCPPOUT) or ""}')
proto_cmd.append('${PROTOPYOUT and "--python_out=%s"%(PROTOPYOUT) or ""}')
proto_cmd.append('${SOURCES}')
env['PROTOCOM'] = ' '.join(proto_cmd)
# Add the proto scanner (if it wasn't added already)
env.AppendUnique(SCANNERS=SCons.Scanner.Scanner(function=protoc_scanner,
skeys=[_PROTOSUFFIX]))
def exists(env):
"""Return True if `protoc` tool exists in the system."""
return env.Detect(_PROTOCS)
| protoc_scanner | identifier_name |
pinger.py | import asyncio
import functools
import logging
from aiohttp import request
from .packet import ControlPacket
PING_TIMEOUT = 10
PING_INTERVAL = 5
class Pinger:
"""
Pinger to send ping packets to an endpoint and inform if the timeout has occurred
"""
def __init__(self, handler, interval, timeout, loop=None, max_failures=5):
"""
Aysncio based pinger
:param handler: Pinger uses it to send a ping and inform when timeout occurs.
Must implement send_ping() and on_timeout() methods
:param int interval: time interval between ping after a pong
:param loop: Optional event loop
"""
self._handler = handler
self._interval = interval
self._timeout = timeout
self._loop = loop or asyncio.get_event_loop()
self._timer = None
self._failures = 0
self._max_failures = max_failures
self.logger = logging.getLogger()
@asyncio.coroutine
def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload)
def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload))
def _start_timer(self, payload=None):
self._timer = self._loop.call_later(self._timeout, functools.partial(self._on_timeout, payload=payload))
def stop(self):
if self._timer is not None:
self._timer.cancel()
def _on_timeout(self, payload=None):
if self._failures < self._max_failures:
self._failures += 1
asyncio.ensure_future(self.send_ping(payload=payload))
else:
self._handler.on_timeout()
class TCPPinger:
logger = logging.getLogger(__name__)
def __init__(self, host, port, node_id, protocol, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._protocol = protocol
self._handler = handler
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
self._protocol.send(ControlPacket.ping(self._node_id, payload=payload))
def on_timeout(self):
self.logger.debug('%s timed out', self._node_id)
# Dummy packet to cleanly close transport
self._protocol._transport.write(
'{"closed":"true", "type":"closed", "service":"none", "version":"none"}'.encode())
self._protocol.close()
self._handler.on_timeout(self._host, self._port, self._node_id)
def stop(self):
self._pinger.stop()
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
class HTTPPinger:
def __init__(self, host, port, node_id, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._handler = handler
self._url = 'http://{}:{}/ping'.format(host, port)
self.logger = logging.getLogger(__name__)
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
asyncio.ensure_future(self.ping_coroutine(payload=payload))
def | (self, payload=None):
try:
res = yield from request('get', self._url)
if res.status == 200:
self.pong_received(payload=payload)
res.close()
except Exception:
self.logger.exception('Error while ping')
def stop(self):
self._pinger.stop()
def on_timeout(self):
self.logger.warn('%s timed out', self._node_id)
self._handler.on_timeout(self._host, self._port, self._node_id)
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
| ping_coroutine | identifier_name |
pinger.py | import asyncio
import functools
import logging
from aiohttp import request
from .packet import ControlPacket
PING_TIMEOUT = 10
PING_INTERVAL = 5
class Pinger:
"""
Pinger to send ping packets to an endpoint and inform if the timeout has occurred
"""
def __init__(self, handler, interval, timeout, loop=None, max_failures=5):
"""
Aysncio based pinger
:param handler: Pinger uses it to send a ping and inform when timeout occurs.
Must implement send_ping() and on_timeout() methods
:param int interval: time interval between ping after a pong
:param loop: Optional event loop
"""
self._handler = handler
self._interval = interval
self._timeout = timeout
self._loop = loop or asyncio.get_event_loop()
self._timer = None
self._failures = 0
self._max_failures = max_failures
self.logger = logging.getLogger()
@asyncio.coroutine
def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload)
def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload))
def _start_timer(self, payload=None):
self._timer = self._loop.call_later(self._timeout, functools.partial(self._on_timeout, payload=payload))
def stop(self):
if self._timer is not None:
self._timer.cancel()
def _on_timeout(self, payload=None):
if self._failures < self._max_failures:
self._failures += 1
asyncio.ensure_future(self.send_ping(payload=payload))
else:
self._handler.on_timeout()
class TCPPinger:
|
class HTTPPinger:
def __init__(self, host, port, node_id, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._handler = handler
self._url = 'http://{}:{}/ping'.format(host, port)
self.logger = logging.getLogger(__name__)
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
asyncio.ensure_future(self.ping_coroutine(payload=payload))
def ping_coroutine(self, payload=None):
try:
res = yield from request('get', self._url)
if res.status == 200:
self.pong_received(payload=payload)
res.close()
except Exception:
self.logger.exception('Error while ping')
def stop(self):
self._pinger.stop()
def on_timeout(self):
self.logger.warn('%s timed out', self._node_id)
self._handler.on_timeout(self._host, self._port, self._node_id)
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
| logger = logging.getLogger(__name__)
def __init__(self, host, port, node_id, protocol, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._protocol = protocol
self._handler = handler
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
self._protocol.send(ControlPacket.ping(self._node_id, payload=payload))
def on_timeout(self):
self.logger.debug('%s timed out', self._node_id)
# Dummy packet to cleanly close transport
self._protocol._transport.write(
'{"closed":"true", "type":"closed", "service":"none", "version":"none"}'.encode())
self._protocol.close()
self._handler.on_timeout(self._host, self._port, self._node_id)
def stop(self):
self._pinger.stop()
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload) | identifier_body |
pinger.py | import asyncio
import functools
import logging
from aiohttp import request
from .packet import ControlPacket
PING_TIMEOUT = 10
PING_INTERVAL = 5
class Pinger:
"""
Pinger to send ping packets to an endpoint and inform if the timeout has occurred
"""
def __init__(self, handler, interval, timeout, loop=None, max_failures=5):
"""
Aysncio based pinger
:param handler: Pinger uses it to send a ping and inform when timeout occurs.
Must implement send_ping() and on_timeout() methods
:param int interval: time interval between ping after a pong
:param loop: Optional event loop
"""
self._handler = handler
self._interval = interval
self._timeout = timeout
self._loop = loop or asyncio.get_event_loop()
self._timer = None
self._failures = 0
self._max_failures = max_failures
self.logger = logging.getLogger()
@asyncio.coroutine
def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload)
def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload))
def _start_timer(self, payload=None):
self._timer = self._loop.call_later(self._timeout, functools.partial(self._on_timeout, payload=payload))
def stop(self):
if self._timer is not None:
self._timer.cancel()
def _on_timeout(self, payload=None):
if self._failures < self._max_failures:
self._failures += 1
asyncio.ensure_future(self.send_ping(payload=payload))
else:
self._handler.on_timeout()
class TCPPinger:
logger = logging.getLogger(__name__)
def __init__(self, host, port, node_id, protocol, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._protocol = protocol
self._handler = handler
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
self._protocol.send(ControlPacket.ping(self._node_id, payload=payload))
def on_timeout(self):
self.logger.debug('%s timed out', self._node_id)
# Dummy packet to cleanly close transport
self._protocol._transport.write(
'{"closed":"true", "type":"closed", "service":"none", "version":"none"}'.encode())
self._protocol.close()
self._handler.on_timeout(self._host, self._port, self._node_id)
def stop(self):
self._pinger.stop()
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
class HTTPPinger:
def __init__(self, host, port, node_id, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._handler = handler
self._url = 'http://{}:{}/ping'.format(host, port)
self.logger = logging.getLogger(__name__)
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
asyncio.ensure_future(self.ping_coroutine(payload=payload))
def ping_coroutine(self, payload=None):
try:
res = yield from request('get', self._url)
if res.status == 200:
|
except Exception:
self.logger.exception('Error while ping')
def stop(self):
self._pinger.stop()
def on_timeout(self):
self.logger.warn('%s timed out', self._node_id)
self._handler.on_timeout(self._host, self._port, self._node_id)
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
| self.pong_received(payload=payload)
res.close() | conditional_block |
pinger.py | import asyncio
import functools
import logging
from aiohttp import request
from .packet import ControlPacket
PING_TIMEOUT = 10
PING_INTERVAL = 5
class Pinger:
"""
Pinger to send ping packets to an endpoint and inform if the timeout has occurred
"""
def __init__(self, handler, interval, timeout, loop=None, max_failures=5):
"""
Aysncio based pinger
:param handler: Pinger uses it to send a ping and inform when timeout occurs.
Must implement send_ping() and on_timeout() methods
:param int interval: time interval between ping after a pong
:param loop: Optional event loop
"""
self._handler = handler
self._interval = interval
self._timeout = timeout
self._loop = loop or asyncio.get_event_loop()
self._timer = None
self._failures = 0
self._max_failures = max_failures
self.logger = logging.getLogger()
@asyncio.coroutine
def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload)
def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload))
def _start_timer(self, payload=None):
self._timer = self._loop.call_later(self._timeout, functools.partial(self._on_timeout, payload=payload))
def stop(self):
if self._timer is not None:
self._timer.cancel()
def _on_timeout(self, payload=None):
if self._failures < self._max_failures:
self._failures += 1
asyncio.ensure_future(self.send_ping(payload=payload))
else:
self._handler.on_timeout()
class TCPPinger:
logger = logging.getLogger(__name__)
def __init__(self, host, port, node_id, protocol, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._protocol = protocol
self._handler = handler
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
self._protocol.send(ControlPacket.ping(self._node_id, payload=payload))
def on_timeout(self):
self.logger.debug('%s timed out', self._node_id)
# Dummy packet to cleanly close transport
self._protocol._transport.write(
'{"closed":"true", "type":"closed", "service":"none", "version":"none"}'.encode())
self._protocol.close()
self._handler.on_timeout(self._host, self._port, self._node_id)
def stop(self):
self._pinger.stop()
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload)
class HTTPPinger:
def __init__(self, host, port, node_id, handler):
self._host = host
self._port = port
self._pinger = Pinger(self, PING_INTERVAL, PING_TIMEOUT)
self._node_id = node_id
self._handler = handler
self._url = 'http://{}:{}/ping'.format(host, port)
self.logger = logging.getLogger(__name__)
def ping(self, payload=None):
asyncio.ensure_future(self._pinger.send_ping(payload=payload))
def send_ping(self, payload=None):
asyncio.ensure_future(self.ping_coroutine(payload=payload))
def ping_coroutine(self, payload=None):
try:
res = yield from request('get', self._url)
if res.status == 200:
self.pong_received(payload=payload)
res.close() |
def stop(self):
self._pinger.stop()
def on_timeout(self):
self.logger.warn('%s timed out', self._node_id)
self._handler.on_timeout(self._host, self._port, self._node_id)
def pong_received(self, payload=None):
self._pinger.pong_received(payload=payload) | except Exception:
self.logger.exception('Error while ping') | random_line_split |
lib.rs | pub mod car;
use car::car_factory::CarFactory;
use car::car_type::{Body, Colour};
use car::Car;
pub struct | {
cars: Vec<Car>,
car_factory: CarFactory,
}
impl Parking {
pub fn new() -> Parking {
Parking {
cars: Vec::new(),
car_factory: CarFactory::new(),
}
}
pub fn add_car(
&mut self,
license_plate: &str,
parking_place_number: u8,
body: Body,
colour: Colour,
) {
self.cars.push(Car::new(
license_plate.to_string(),
parking_place_number,
self.car_factory.get_car_type_id(body, colour),
));
}
pub fn print(&mut self) {
for car in &self.cars {
car.print(self.car_factory.get_car_type(car.car_type_id).unwrap());
}
println!("\nNumber of cars: {}", self.cars.len());
self.car_factory.print();
}
}
| Parking | identifier_name |
lib.rs | pub mod car;
use car::car_factory::CarFactory;
use car::car_type::{Body, Colour};
use car::Car;
pub struct Parking {
cars: Vec<Car>,
car_factory: CarFactory,
}
impl Parking {
pub fn new() -> Parking {
Parking {
cars: Vec::new(),
car_factory: CarFactory::new(),
}
}
pub fn add_car(
&mut self, | ) {
self.cars.push(Car::new(
license_plate.to_string(),
parking_place_number,
self.car_factory.get_car_type_id(body, colour),
));
}
pub fn print(&mut self) {
for car in &self.cars {
car.print(self.car_factory.get_car_type(car.car_type_id).unwrap());
}
println!("\nNumber of cars: {}", self.cars.len());
self.car_factory.print();
}
} | license_plate: &str,
parking_place_number: u8,
body: Body,
colour: Colour, | random_line_split |
lib.rs | pub mod car;
use car::car_factory::CarFactory;
use car::car_type::{Body, Colour};
use car::Car;
pub struct Parking {
cars: Vec<Car>,
car_factory: CarFactory,
}
impl Parking {
pub fn new() -> Parking {
Parking {
cars: Vec::new(),
car_factory: CarFactory::new(),
}
}
pub fn add_car(
&mut self,
license_plate: &str,
parking_place_number: u8,
body: Body,
colour: Colour,
) {
self.cars.push(Car::new(
license_plate.to_string(),
parking_place_number,
self.car_factory.get_car_type_id(body, colour),
));
}
pub fn print(&mut self) |
}
| {
for car in &self.cars {
car.print(self.car_factory.get_car_type(car.car_type_id).unwrap());
}
println!("\nNumber of cars: {}", self.cars.len());
self.car_factory.print();
} | identifier_body |
index.d.ts | // Type definitions for knex-cleaner 1.3
// Project: https://github.com/steven-ferguson/knex-cleaner
// Definitions by: Karol Goraus <https://github.com/Szarlus>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped | * Choose between simply deleting all rows from table or truncating it completely. Default is 'truncate'
*/
mode?: 'truncate' | 'delete' | undefined;
/**
* Used to tell PostgresSQL to reset the ID counter, default is true
*/
restartIdentity?: boolean | undefined;
/**
* List of tables to ignore. Empty array by default.
*/
ignoreTables?: string[] | undefined;
}
export function clean(knex: Knex, options?: KnexCleanerOptions): Promise<void>; | // TypeScript Version: 4.1
import { Knex } from 'knex';
export interface KnexCleanerOptions {
/** | random_line_split |
glcommon.rs | use core::prelude::*;
use opengles::gl2;
use opengles::gl2::{GLuint, GLint};
use core::borrow::{Cow, IntoCow};
use collections::string::String;
pub type GLResult<T> = Result<T, MString>;
pub type MString = Cow<'static, String, str>;
fn get_gl_error_name(error: u32) -> &'static str {
match error {
gl2::NO_ERROR => "GL_NO_ERROR",
gl2::INVALID_ENUM => "GL_INVALID_ENUM",
gl2::INVALID_VALUE => "GL_INVALID_VALUE",
gl2::INVALID_OPERATION => "GL_INVALID_OPERATION",
gl2::INVALID_FRAMEBUFFER_OPERATION => "GL_INVALID_FRAMEBUFFER_OPERATION",
gl2::OUT_OF_MEMORY => "GL_OUT_OF_MEMORY",
_ => "unknown error!",
}
}
pub fn check_gl_error(name: &str) {
loop {
match gl2::get_error() {
0 => break,
error => logi!("after {} glError (0x{}): {}\n", name, error, get_gl_error_name(error)),
}
}
}
#[allow(dead_code)]
pub fn check_framebuffer_complete() -> bool {
let (err, result) = match gl2::check_framebuffer_status(gl2::FRAMEBUFFER) {
gl2::FRAMEBUFFER_COMPLETE => ("FRAMEBUFFER_COMPLETE", true),
gl2::FRAMEBUFFER_INCOMPLETE_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_DIMENSIONS => ("FRAMEBUFFER_INCOMPLETE_DIMENSIONS", false),
gl2::FRAMEBUFFER_UNSUPPORTED => ("FRAMEBUFFER_UNSUPPORTED", false),
_ => ("unknown error!", false)
};
debug_logi!("framebuffer status: {}", err);
result
}
pub fn load_shader(shader_type: gl2::GLenum, source: &str) -> GLResult<GLuint> {
let shader = gl2::create_shader(shader_type);
if shader != 0 {
gl2::shader_source(shader, [source.as_bytes()].as_slice());
gl2::compile_shader(shader);
let compiled = gl2::get_shader_iv(shader, gl2::COMPILE_STATUS);
if compiled != 0 {
Ok(shader)
} else {
let log = gl2::get_shader_info_log(shader).into_cow();
loge!("Could not compile shader {}:\n{}\n", shader_type, log);
gl2::delete_shader(shader);
Err(log)
}
} else {
Err(format!("Unknown error initializing shader type {}", shader_type).into_cow())
}
}
pub fn create_program(vertex_source: &str, fragment_source: &str) -> GLResult<GLuint> {
let vert_shader = try!(load_shader(gl2::VERTEX_SHADER, vertex_source));
let pixel_shader = try!(load_shader(gl2::FRAGMENT_SHADER, fragment_source));
let program = gl2::create_program();
if program == 0 {
return Err("Unknown error creating shader program".into_cow());
}
gl2::attach_shader(program, vert_shader);
check_gl_error("glAttachShader");
gl2::attach_shader(program, pixel_shader);
check_gl_error("glAttachShader");
gl2::link_program(program);
if gl2::get_program_iv(program, gl2::LINK_STATUS) as u8 == gl2::TRUE {
Ok(program)
} else {
let log = gl2::get_program_info_log(program).into_cow();
loge!("Could not link program: \n{}\n", log);
gl2::delete_program(program);
Err(log)
}
}
pub fn get_shader_handle(program: GLuint, name: &str) -> Option<GLuint> {
let handle = gl2::get_attrib_location(program, name);
check_gl_error(format!("get_shader_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle as GLuint) }
}
/// gl silently ignores writes to uniform -1, so this is not strictly necessary
pub fn get_uniform_handle_option(program: GLuint, name: &str) -> Option<GLint> {
let handle = gl2::get_uniform_location(program, name);
check_gl_error(format!("get_uniform_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle) }
}
pub trait Shader {
fn new(vertopt: MString, fragopt: MString) -> GLResult<Self>;
}
pub struct Defaults<Init> {
pub val: Init
}
pub trait FillDefaults<Init> {
type Unfilled;
fn fill_defaults(unfilled: <Self as FillDefaults<Init>>::Unfilled) -> Defaults<Init>;
}
pub trait UsingDefaults<Init> {
type Defaults;
//fn fill_defaults(Init) -> <Self as UsingDefaults<Init>>::Defaults;
fn maybe_init(Init) -> GLResult<Self>;
fn get_source(&self) -> &<Self as UsingDefaults<Init>>::Defaults;
}
pub trait UsingDefaultsSafe { }
macro_rules! glattrib_f32 (
// struct elements
($handle:expr, $count:expr, $item:ident, $elem:ident) => ({
unsafe {
// XXX probably also unsafe
let firstref = $item.get_unchecked(0); | gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT, false as ::opengles::gl2::GLboolean,
mem::size_of_val(firstref) as i32,
// XXX this actually derefences firstref and is completely unsafe
// is there better way to do offsetof in rust? there ought to be
mem::transmute(&firstref.$elem));
}
check_gl_error(stringify!(vertex_attrib_pointer($elem)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
// densely-packed array
($handle:expr, $count:expr, $item:ident) => ({
unsafe {
let firstref = $item.get_unchecked(0) ;
gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT,
false as ::opengles::gl2::GLboolean, 0, mem::transmute(firstref));
}
check_gl_error(stringify!(vertex_attrib_pointer($handle)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
);
macro_rules! gl_bindtexture (
($texunit:expr, $kind:expr, $texture:expr, $handle:expr) => ({
gl2::active_texture(gl2::TEXTURE0 + $texunit);
check_gl_error(stringify!(active_texture($texture)));
gl2::bind_texture($kind, $texture);
check_gl_error(stringify!(bind_texture($texture)));
gl2::uniform_1i($handle, $texunit);
check_gl_error(stringify!(uniform1i($texture)));
});
); | random_line_split |
|
glcommon.rs | use core::prelude::*;
use opengles::gl2;
use opengles::gl2::{GLuint, GLint};
use core::borrow::{Cow, IntoCow};
use collections::string::String;
pub type GLResult<T> = Result<T, MString>;
pub type MString = Cow<'static, String, str>;
fn get_gl_error_name(error: u32) -> &'static str {
match error {
gl2::NO_ERROR => "GL_NO_ERROR",
gl2::INVALID_ENUM => "GL_INVALID_ENUM",
gl2::INVALID_VALUE => "GL_INVALID_VALUE",
gl2::INVALID_OPERATION => "GL_INVALID_OPERATION",
gl2::INVALID_FRAMEBUFFER_OPERATION => "GL_INVALID_FRAMEBUFFER_OPERATION",
gl2::OUT_OF_MEMORY => "GL_OUT_OF_MEMORY",
_ => "unknown error!",
}
}
pub fn check_gl_error(name: &str) {
loop {
match gl2::get_error() {
0 => break,
error => logi!("after {} glError (0x{}): {}\n", name, error, get_gl_error_name(error)),
}
}
}
#[allow(dead_code)]
pub fn check_framebuffer_complete() -> bool {
let (err, result) = match gl2::check_framebuffer_status(gl2::FRAMEBUFFER) {
gl2::FRAMEBUFFER_COMPLETE => ("FRAMEBUFFER_COMPLETE", true),
gl2::FRAMEBUFFER_INCOMPLETE_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_DIMENSIONS => ("FRAMEBUFFER_INCOMPLETE_DIMENSIONS", false),
gl2::FRAMEBUFFER_UNSUPPORTED => ("FRAMEBUFFER_UNSUPPORTED", false),
_ => ("unknown error!", false)
};
debug_logi!("framebuffer status: {}", err);
result
}
pub fn load_shader(shader_type: gl2::GLenum, source: &str) -> GLResult<GLuint> {
let shader = gl2::create_shader(shader_type);
if shader != 0 {
gl2::shader_source(shader, [source.as_bytes()].as_slice());
gl2::compile_shader(shader);
let compiled = gl2::get_shader_iv(shader, gl2::COMPILE_STATUS);
if compiled != 0 {
Ok(shader)
} else {
let log = gl2::get_shader_info_log(shader).into_cow();
loge!("Could not compile shader {}:\n{}\n", shader_type, log);
gl2::delete_shader(shader);
Err(log)
}
} else {
Err(format!("Unknown error initializing shader type {}", shader_type).into_cow())
}
}
pub fn create_program(vertex_source: &str, fragment_source: &str) -> GLResult<GLuint> {
let vert_shader = try!(load_shader(gl2::VERTEX_SHADER, vertex_source));
let pixel_shader = try!(load_shader(gl2::FRAGMENT_SHADER, fragment_source));
let program = gl2::create_program();
if program == 0 {
return Err("Unknown error creating shader program".into_cow());
}
gl2::attach_shader(program, vert_shader);
check_gl_error("glAttachShader");
gl2::attach_shader(program, pixel_shader);
check_gl_error("glAttachShader");
gl2::link_program(program);
if gl2::get_program_iv(program, gl2::LINK_STATUS) as u8 == gl2::TRUE {
Ok(program)
} else {
let log = gl2::get_program_info_log(program).into_cow();
loge!("Could not link program: \n{}\n", log);
gl2::delete_program(program);
Err(log)
}
}
pub fn get_shader_handle(program: GLuint, name: &str) -> Option<GLuint> {
let handle = gl2::get_attrib_location(program, name);
check_gl_error(format!("get_shader_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle as GLuint) }
}
/// gl silently ignores writes to uniform -1, so this is not strictly necessary
pub fn get_uniform_handle_option(program: GLuint, name: &str) -> Option<GLint> {
let handle = gl2::get_uniform_location(program, name);
check_gl_error(format!("get_uniform_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle) }
}
pub trait Shader {
fn new(vertopt: MString, fragopt: MString) -> GLResult<Self>;
}
pub struct | <Init> {
pub val: Init
}
pub trait FillDefaults<Init> {
type Unfilled;
fn fill_defaults(unfilled: <Self as FillDefaults<Init>>::Unfilled) -> Defaults<Init>;
}
pub trait UsingDefaults<Init> {
type Defaults;
//fn fill_defaults(Init) -> <Self as UsingDefaults<Init>>::Defaults;
fn maybe_init(Init) -> GLResult<Self>;
fn get_source(&self) -> &<Self as UsingDefaults<Init>>::Defaults;
}
pub trait UsingDefaultsSafe { }
macro_rules! glattrib_f32 (
// struct elements
($handle:expr, $count:expr, $item:ident, $elem:ident) => ({
unsafe {
// XXX probably also unsafe
let firstref = $item.get_unchecked(0);
gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT, false as ::opengles::gl2::GLboolean,
mem::size_of_val(firstref) as i32,
// XXX this actually derefences firstref and is completely unsafe
// is there better way to do offsetof in rust? there ought to be
mem::transmute(&firstref.$elem));
}
check_gl_error(stringify!(vertex_attrib_pointer($elem)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
// densely-packed array
($handle:expr, $count:expr, $item:ident) => ({
unsafe {
let firstref = $item.get_unchecked(0) ;
gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT,
false as ::opengles::gl2::GLboolean, 0, mem::transmute(firstref));
}
check_gl_error(stringify!(vertex_attrib_pointer($handle)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
);
macro_rules! gl_bindtexture (
($texunit:expr, $kind:expr, $texture:expr, $handle:expr) => ({
gl2::active_texture(gl2::TEXTURE0 + $texunit);
check_gl_error(stringify!(active_texture($texture)));
gl2::bind_texture($kind, $texture);
check_gl_error(stringify!(bind_texture($texture)));
gl2::uniform_1i($handle, $texunit);
check_gl_error(stringify!(uniform1i($texture)));
});
);
| Defaults | identifier_name |
glcommon.rs | use core::prelude::*;
use opengles::gl2;
use opengles::gl2::{GLuint, GLint};
use core::borrow::{Cow, IntoCow};
use collections::string::String;
pub type GLResult<T> = Result<T, MString>;
pub type MString = Cow<'static, String, str>;
fn get_gl_error_name(error: u32) -> &'static str {
match error {
gl2::NO_ERROR => "GL_NO_ERROR",
gl2::INVALID_ENUM => "GL_INVALID_ENUM",
gl2::INVALID_VALUE => "GL_INVALID_VALUE",
gl2::INVALID_OPERATION => "GL_INVALID_OPERATION",
gl2::INVALID_FRAMEBUFFER_OPERATION => "GL_INVALID_FRAMEBUFFER_OPERATION",
gl2::OUT_OF_MEMORY => "GL_OUT_OF_MEMORY",
_ => "unknown error!",
}
}
pub fn check_gl_error(name: &str) {
loop {
match gl2::get_error() {
0 => break,
error => logi!("after {} glError (0x{}): {}\n", name, error, get_gl_error_name(error)),
}
}
}
#[allow(dead_code)]
pub fn check_framebuffer_complete() -> bool |
pub fn load_shader(shader_type: gl2::GLenum, source: &str) -> GLResult<GLuint> {
let shader = gl2::create_shader(shader_type);
if shader != 0 {
gl2::shader_source(shader, [source.as_bytes()].as_slice());
gl2::compile_shader(shader);
let compiled = gl2::get_shader_iv(shader, gl2::COMPILE_STATUS);
if compiled != 0 {
Ok(shader)
} else {
let log = gl2::get_shader_info_log(shader).into_cow();
loge!("Could not compile shader {}:\n{}\n", shader_type, log);
gl2::delete_shader(shader);
Err(log)
}
} else {
Err(format!("Unknown error initializing shader type {}", shader_type).into_cow())
}
}
pub fn create_program(vertex_source: &str, fragment_source: &str) -> GLResult<GLuint> {
let vert_shader = try!(load_shader(gl2::VERTEX_SHADER, vertex_source));
let pixel_shader = try!(load_shader(gl2::FRAGMENT_SHADER, fragment_source));
let program = gl2::create_program();
if program == 0 {
return Err("Unknown error creating shader program".into_cow());
}
gl2::attach_shader(program, vert_shader);
check_gl_error("glAttachShader");
gl2::attach_shader(program, pixel_shader);
check_gl_error("glAttachShader");
gl2::link_program(program);
if gl2::get_program_iv(program, gl2::LINK_STATUS) as u8 == gl2::TRUE {
Ok(program)
} else {
let log = gl2::get_program_info_log(program).into_cow();
loge!("Could not link program: \n{}\n", log);
gl2::delete_program(program);
Err(log)
}
}
pub fn get_shader_handle(program: GLuint, name: &str) -> Option<GLuint> {
let handle = gl2::get_attrib_location(program, name);
check_gl_error(format!("get_shader_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle as GLuint) }
}
/// gl silently ignores writes to uniform -1, so this is not strictly necessary
pub fn get_uniform_handle_option(program: GLuint, name: &str) -> Option<GLint> {
let handle = gl2::get_uniform_location(program, name);
check_gl_error(format!("get_uniform_handle({})", name).as_slice());
if handle == -1 { None } else { Some(handle) }
}
pub trait Shader {
fn new(vertopt: MString, fragopt: MString) -> GLResult<Self>;
}
pub struct Defaults<Init> {
pub val: Init
}
pub trait FillDefaults<Init> {
type Unfilled;
fn fill_defaults(unfilled: <Self as FillDefaults<Init>>::Unfilled) -> Defaults<Init>;
}
pub trait UsingDefaults<Init> {
type Defaults;
//fn fill_defaults(Init) -> <Self as UsingDefaults<Init>>::Defaults;
fn maybe_init(Init) -> GLResult<Self>;
fn get_source(&self) -> &<Self as UsingDefaults<Init>>::Defaults;
}
pub trait UsingDefaultsSafe { }
macro_rules! glattrib_f32 (
// struct elements
($handle:expr, $count:expr, $item:ident, $elem:ident) => ({
unsafe {
// XXX probably also unsafe
let firstref = $item.get_unchecked(0);
gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT, false as ::opengles::gl2::GLboolean,
mem::size_of_val(firstref) as i32,
// XXX this actually derefences firstref and is completely unsafe
// is there better way to do offsetof in rust? there ought to be
mem::transmute(&firstref.$elem));
}
check_gl_error(stringify!(vertex_attrib_pointer($elem)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
// densely-packed array
($handle:expr, $count:expr, $item:ident) => ({
unsafe {
let firstref = $item.get_unchecked(0) ;
gl2::glVertexAttribPointer($handle, $count, gl2::FLOAT,
false as ::opengles::gl2::GLboolean, 0, mem::transmute(firstref));
}
check_gl_error(stringify!(vertex_attrib_pointer($handle)));
gl2::enable_vertex_attrib_array($handle);
check_gl_error("enable_vertex_array");
});
);
macro_rules! gl_bindtexture (
($texunit:expr, $kind:expr, $texture:expr, $handle:expr) => ({
gl2::active_texture(gl2::TEXTURE0 + $texunit);
check_gl_error(stringify!(active_texture($texture)));
gl2::bind_texture($kind, $texture);
check_gl_error(stringify!(bind_texture($texture)));
gl2::uniform_1i($handle, $texunit);
check_gl_error(stringify!(uniform1i($texture)));
});
);
| {
let (err, result) = match gl2::check_framebuffer_status(gl2::FRAMEBUFFER) {
gl2::FRAMEBUFFER_COMPLETE => ("FRAMEBUFFER_COMPLETE", true),
gl2::FRAMEBUFFER_INCOMPLETE_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT => ("FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT", false),
gl2::FRAMEBUFFER_INCOMPLETE_DIMENSIONS => ("FRAMEBUFFER_INCOMPLETE_DIMENSIONS", false),
gl2::FRAMEBUFFER_UNSUPPORTED => ("FRAMEBUFFER_UNSUPPORTED", false),
_ => ("unknown error!", false)
};
debug_logi!("framebuffer status: {}", err);
result
} | identifier_body |
session.rs | use alloc::boxed::Box;
use collections::string::{String, ToString};
use collections::vec::Vec;
use scheduler;
use schemes::KScheme;
use schemes::{Resource, Url, VecResource};
/// A session
pub struct Session {
/// The scheme items
pub items: Vec<Box<KScheme>>,
}
impl Session {
/// Create new session
pub fn new() -> Box<Self> {
box Session {
items: Vec::new(),
}
}
pub unsafe fn | (&mut self, irq: u8) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_irq(irq);
}
scheduler::end_no_ints(reenable);
}
pub unsafe fn on_poll(&mut self) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_poll();
}
scheduler::end_no_ints(reenable);
}
/// Open a new resource
pub fn open(&mut self, url: &Url, flags: usize) -> Option<Box<Resource>> {
if url.scheme().len() == 0 {
let mut list = String::new();
for item in self.items.iter() {
let scheme = item.scheme();
if !scheme.is_empty() {
if !list.is_empty() {
list = list + "\n" + scheme;
} else {
list = scheme.to_string();
}
}
}
Some(box VecResource::new(Url::new(), list.into_bytes()))
} else {
for mut item in self.items.iter_mut() {
if item.scheme() == url.scheme() {
return item.open(url, flags);
}
}
None
}
}
}
| on_irq | identifier_name |
session.rs | use alloc::boxed::Box;
use collections::string::{String, ToString};
use collections::vec::Vec;
use scheduler;
use schemes::KScheme;
use schemes::{Resource, Url, VecResource};
/// A session
pub struct Session {
/// The scheme items
pub items: Vec<Box<KScheme>>,
}
impl Session {
/// Create new session | pub fn new() -> Box<Self> {
box Session {
items: Vec::new(),
}
}
pub unsafe fn on_irq(&mut self, irq: u8) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_irq(irq);
}
scheduler::end_no_ints(reenable);
}
pub unsafe fn on_poll(&mut self) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_poll();
}
scheduler::end_no_ints(reenable);
}
/// Open a new resource
pub fn open(&mut self, url: &Url, flags: usize) -> Option<Box<Resource>> {
if url.scheme().len() == 0 {
let mut list = String::new();
for item in self.items.iter() {
let scheme = item.scheme();
if !scheme.is_empty() {
if !list.is_empty() {
list = list + "\n" + scheme;
} else {
list = scheme.to_string();
}
}
}
Some(box VecResource::new(Url::new(), list.into_bytes()))
} else {
for mut item in self.items.iter_mut() {
if item.scheme() == url.scheme() {
return item.open(url, flags);
}
}
None
}
}
} | random_line_split |
|
session.rs | use alloc::boxed::Box;
use collections::string::{String, ToString};
use collections::vec::Vec;
use scheduler;
use schemes::KScheme;
use schemes::{Resource, Url, VecResource};
/// A session
pub struct Session {
/// The scheme items
pub items: Vec<Box<KScheme>>,
}
impl Session {
/// Create new session
pub fn new() -> Box<Self> {
box Session {
items: Vec::new(),
}
}
pub unsafe fn on_irq(&mut self, irq: u8) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_irq(irq);
}
scheduler::end_no_ints(reenable);
}
pub unsafe fn on_poll(&mut self) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_poll();
}
scheduler::end_no_ints(reenable);
}
/// Open a new resource
pub fn open(&mut self, url: &Url, flags: usize) -> Option<Box<Resource>> |
}
| {
if url.scheme().len() == 0 {
let mut list = String::new();
for item in self.items.iter() {
let scheme = item.scheme();
if !scheme.is_empty() {
if !list.is_empty() {
list = list + "\n" + scheme;
} else {
list = scheme.to_string();
}
}
}
Some(box VecResource::new(Url::new(), list.into_bytes()))
} else {
for mut item in self.items.iter_mut() {
if item.scheme() == url.scheme() {
return item.open(url, flags);
}
}
None
}
} | identifier_body |
session.rs | use alloc::boxed::Box;
use collections::string::{String, ToString};
use collections::vec::Vec;
use scheduler;
use schemes::KScheme;
use schemes::{Resource, Url, VecResource};
/// A session
pub struct Session {
/// The scheme items
pub items: Vec<Box<KScheme>>,
}
impl Session {
/// Create new session
pub fn new() -> Box<Self> {
box Session {
items: Vec::new(),
}
}
pub unsafe fn on_irq(&mut self, irq: u8) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_irq(irq);
}
scheduler::end_no_ints(reenable);
}
pub unsafe fn on_poll(&mut self) {
let reenable = scheduler::start_no_ints();
for mut item in self.items.iter_mut() {
item.on_poll();
}
scheduler::end_no_ints(reenable);
}
/// Open a new resource
pub fn open(&mut self, url: &Url, flags: usize) -> Option<Box<Resource>> {
if url.scheme().len() == 0 | else {
for mut item in self.items.iter_mut() {
if item.scheme() == url.scheme() {
return item.open(url, flags);
}
}
None
}
}
}
| {
let mut list = String::new();
for item in self.items.iter() {
let scheme = item.scheme();
if !scheme.is_empty() {
if !list.is_empty() {
list = list + "\n" + scheme;
} else {
list = scheme.to_string();
}
}
}
Some(box VecResource::new(Url::new(), list.into_bytes()))
} | conditional_block |
issue-9446.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
struct | (String);
impl Wrapper {
pub fn new(wrapped: String) -> Wrapper {
Wrapper(wrapped)
}
pub fn say_hi(&self) {
let Wrapper(ref s) = *self;
println!("hello {}", *s);
}
}
impl Drop for Wrapper {
fn drop(&mut self) {}
}
pub fn main() {
{
// This runs without complaint.
let x = Wrapper::new("Bob".to_string());
x.say_hi();
}
{
// This fails to compile, circa 0.8-89-gc635fba.
// error: internal compiler error: drop_ty_immediate: non-box ty
Wrapper::new("Bob".to_string()).say_hi();
}
}
| Wrapper | identifier_name |
issue-9446.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
struct Wrapper(String);
impl Wrapper {
pub fn new(wrapped: String) -> Wrapper {
Wrapper(wrapped)
}
pub fn say_hi(&self) { | impl Drop for Wrapper {
fn drop(&mut self) {}
}
pub fn main() {
{
// This runs without complaint.
let x = Wrapper::new("Bob".to_string());
x.say_hi();
}
{
// This fails to compile, circa 0.8-89-gc635fba.
// error: internal compiler error: drop_ty_immediate: non-box ty
Wrapper::new("Bob".to_string()).say_hi();
}
} | let Wrapper(ref s) = *self;
println!("hello {}", *s);
}
}
| random_line_split |
issue-9446.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
struct Wrapper(String);
impl Wrapper {
pub fn new(wrapped: String) -> Wrapper {
Wrapper(wrapped)
}
pub fn say_hi(&self) {
let Wrapper(ref s) = *self;
println!("hello {}", *s);
}
}
impl Drop for Wrapper {
fn drop(&mut self) |
}
pub fn main() {
{
// This runs without complaint.
let x = Wrapper::new("Bob".to_string());
x.say_hi();
}
{
// This fails to compile, circa 0.8-89-gc635fba.
// error: internal compiler error: drop_ty_immediate: non-box ty
Wrapper::new("Bob".to_string()).say_hi();
}
}
| {} | identifier_body |
dragAction.ts | import { GraphType, rootNodeId } from '../../Graph'
import { cutGraph, dropGraph } from '../../Graph/helper/GraphHelper'
import { uimap } from '../../Graph/helper/uimap'
import { DragStartType } from '../types'
import { ActionContextType } from '../../context.type'
import * as check from 'check-types'
const dragp = [ '$dragdrop', 'drag' ]
export const dragAction =
( { state
, input
, output
} : ActionContextType
) => {
const drag: DragStartType =
Object.assign ( {}, input.drag )
if ( drag.ownerType === 'library' ) {
drag.dgraph = state.get ( [ 'data', 'component', drag.componentId, 'graph' ] )
console.log ( state.get ( [ 'data', 'component', drag.componentId ] ) )
}
else {
let graph = state.get ( [ drag.ownerType, 'graph' ] )
const otype = drag.ownerType === 'project' ? 'scene' : 'project'
drag.dgraph = cutGraph
( graph |
drag.uigraph = uimap ( drag.dgraph )
state.set ( dragp, drag )
} | , drag.nodeId
)
drag.rgraph = dropGraph ( graph, drag.nodeId )
} | random_line_split |
dragAction.ts | import { GraphType, rootNodeId } from '../../Graph'
import { cutGraph, dropGraph } from '../../Graph/helper/GraphHelper'
import { uimap } from '../../Graph/helper/uimap'
import { DragStartType } from '../types'
import { ActionContextType } from '../../context.type'
import * as check from 'check-types'
const dragp = [ '$dragdrop', 'drag' ]
export const dragAction =
( { state
, input
, output
} : ActionContextType
) => {
const drag: DragStartType =
Object.assign ( {}, input.drag )
if ( drag.ownerType === 'library' ) |
else {
let graph = state.get ( [ drag.ownerType, 'graph' ] )
const otype = drag.ownerType === 'project' ? 'scene' : 'project'
drag.dgraph = cutGraph
( graph
, drag.nodeId
)
drag.rgraph = dropGraph ( graph, drag.nodeId )
}
drag.uigraph = uimap ( drag.dgraph )
state.set ( dragp, drag )
}
| {
drag.dgraph = state.get ( [ 'data', 'component', drag.componentId, 'graph' ] )
console.log ( state.get ( [ 'data', 'component', drag.componentId ] ) )
} | conditional_block |
app.ts | /// <reference path="../typings/_custom.d.ts" />
/*
* TODO: use the real App component
* change `app-simple.js` to `app.js` in src/public/index.html
*/
/*
* Angular 2 decorators and servces
*/
import {Directive, Component, View} from 'angular2/angular2';
import {RouteConfig, Router} from 'angular2/router';
import {Http} from 'angular2/http';
/*
* Angular Directives
*/
import {coreDirectives} from 'angular2/directives';
import {routerDirectives} from 'angular2/router';
import {formDirectives} from 'angular2/forms';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app'
})
@View({
// needed in order to tell Angular's compiler what's in the template
directives: [ coreDirectives, formDirectives, routerDirectives ],
template: `
<style>
.title { font-family: Arial, Helvetica, sans-serif; }
</style>
<header>
<h1 class="title">Hello {{ name }}</h1>
</header>
<main>
Your Content Here
<pre>data = {{ data | json }}</pre>
</main>
<footer>
WebPack Angular 2 Starter by <a href="https://twitter.com/AngularClass">@AngularClass</a>
</footer>
`
})
export class App {
name: string;
data: Array<any> = []; // default data
constructor(public http: Http) {
this.name = 'Angular 2';
this.getData();
}
| () {
// npm install express connect-history-api-fallback morgan body-parser
// npm run express
this.http.
get('/api/todos', {
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
}).
toRx().
map(res => res.json()).
subscribe(
// onNext value
data => this.serverData(data),
// onError
err => this.errorMessage(err)
);//end http
}
serverData(data) {
console.log('data', data);
this.data = data;
}
errorMessage(err) {
if (err && (/Unexpected token/).test(err.message)) {
console.info(`${'\n'
} // You must run these commands for the Http API to work ${'\n'
} npm install express connect-history-api-fallback morgan body-parser ${'\n'
} npm run express
`);
}//end err.message
}
}
| getData | identifier_name |
app.ts | /// <reference path="../typings/_custom.d.ts" />
/*
* TODO: use the real App component
* change `app-simple.js` to `app.js` in src/public/index.html
*/
/*
* Angular 2 decorators and servces
*/
import {Directive, Component, View} from 'angular2/angular2';
import {RouteConfig, Router} from 'angular2/router';
import {Http} from 'angular2/http';
/*
* Angular Directives
*/
import {coreDirectives} from 'angular2/directives';
import {routerDirectives} from 'angular2/router';
import {formDirectives} from 'angular2/forms';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app'
})
@View({
// needed in order to tell Angular's compiler what's in the template
directives: [ coreDirectives, formDirectives, routerDirectives ],
template: `
<style>
.title { font-family: Arial, Helvetica, sans-serif; }
</style>
<header>
<h1 class="title">Hello {{ name }}</h1>
</header>
<main>
Your Content Here
<pre>data = {{ data | json }}</pre>
</main>
<footer>
WebPack Angular 2 Starter by <a href="https://twitter.com/AngularClass">@AngularClass</a>
</footer>
`
})
export class App {
name: string;
data: Array<any> = []; // default data
constructor(public http: Http) {
this.name = 'Angular 2';
this.getData();
}
getData() {
// npm install express connect-history-api-fallback morgan body-parser
// npm run express
this.http.
get('/api/todos', {
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
}).
toRx().
map(res => res.json()).
subscribe(
// onNext value
data => this.serverData(data),
// onError
err => this.errorMessage(err)
);//end http
}
serverData(data) {
console.log('data', data);
this.data = data;
}
errorMessage(err) {
if (err && (/Unexpected token/).test(err.message)) |
}
}
| {
console.info(`${'\n'
} // You must run these commands for the Http API to work ${'\n'
} npm install express connect-history-api-fallback morgan body-parser ${'\n'
} npm run express
`);
}//end err.message | conditional_block |
app.ts | /// <reference path="../typings/_custom.d.ts" />
/*
* TODO: use the real App component
* change `app-simple.js` to `app.js` in src/public/index.html
*/
/*
* Angular 2 decorators and servces
*/
import {Directive, Component, View} from 'angular2/angular2';
import {RouteConfig, Router} from 'angular2/router';
import {Http} from 'angular2/http';
/*
* Angular Directives
*/
import {coreDirectives} from 'angular2/directives';
import {routerDirectives} from 'angular2/router';
import {formDirectives} from 'angular2/forms';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app'
})
@View({
// needed in order to tell Angular's compiler what's in the template
directives: [ coreDirectives, formDirectives, routerDirectives ],
template: `
<style>
.title { font-family: Arial, Helvetica, sans-serif; }
</style>
<header>
<h1 class="title">Hello {{ name }}</h1>
</header>
<main>
Your Content Here
<pre>data = {{ data | json }}</pre>
</main>
<footer>
WebPack Angular 2 Starter by <a href="https://twitter.com/AngularClass">@AngularClass</a>
</footer>
`
})
export class App {
name: string;
data: Array<any> = []; // default data
constructor(public http: Http) {
this.name = 'Angular 2';
this.getData();
}
getData() {
// npm install express connect-history-api-fallback morgan body-parser
// npm run express
this.http.
get('/api/todos', {
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
}).
toRx().
map(res => res.json()).
subscribe(
// onNext value
data => this.serverData(data),
// onError
err => this.errorMessage(err)
);//end http
}
serverData(data) {
console.log('data', data);
this.data = data;
}
errorMessage(err) {
if (err && (/Unexpected token/).test(err.message)) {
console.info(`${'\n'
} // You must run these commands for the Http API to work ${'\n'
} npm install express connect-history-api-fallback morgan body-parser ${'\n'
} npm run express
`);
}//end err.message | } | } | random_line_split |
app.ts | /// <reference path="../typings/_custom.d.ts" />
/*
* TODO: use the real App component
* change `app-simple.js` to `app.js` in src/public/index.html
*/
/*
* Angular 2 decorators and servces
*/
import {Directive, Component, View} from 'angular2/angular2';
import {RouteConfig, Router} from 'angular2/router';
import {Http} from 'angular2/http';
/*
* Angular Directives
*/
import {coreDirectives} from 'angular2/directives';
import {routerDirectives} from 'angular2/router';
import {formDirectives} from 'angular2/forms';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app'
})
@View({
// needed in order to tell Angular's compiler what's in the template
directives: [ coreDirectives, formDirectives, routerDirectives ],
template: `
<style>
.title { font-family: Arial, Helvetica, sans-serif; }
</style>
<header>
<h1 class="title">Hello {{ name }}</h1>
</header>
<main>
Your Content Here
<pre>data = {{ data | json }}</pre>
</main>
<footer>
WebPack Angular 2 Starter by <a href="https://twitter.com/AngularClass">@AngularClass</a>
</footer>
`
})
export class App {
name: string;
data: Array<any> = []; // default data
constructor(public http: Http) |
getData() {
// npm install express connect-history-api-fallback morgan body-parser
// npm run express
this.http.
get('/api/todos', {
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
}).
toRx().
map(res => res.json()).
subscribe(
// onNext value
data => this.serverData(data),
// onError
err => this.errorMessage(err)
);//end http
}
serverData(data) {
console.log('data', data);
this.data = data;
}
errorMessage(err) {
if (err && (/Unexpected token/).test(err.message)) {
console.info(`${'\n'
} // You must run these commands for the Http API to work ${'\n'
} npm install express connect-history-api-fallback morgan body-parser ${'\n'
} npm run express
`);
}//end err.message
}
}
| {
this.name = 'Angular 2';
this.getData();
} | identifier_body |
jsonmaking.py | import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except:
print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
return random.randrange(start,end)
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def | (n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from list check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<random_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
print "\r", str(i)+"%"
time.sleep(.025)
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed")
| random_number_maker | identifier_name |
jsonmaking.py | import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except:
print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
|
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def random_number_maker(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from list check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<random_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
print "\r", str(i)+"%"
time.sleep(.025)
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed")
| return random.randrange(start,end) | identifier_body |
jsonmaking.py | import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except: | print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
return random.randrange(start,end)
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def random_number_maker(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from list check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<random_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
print "\r", str(i)+"%"
time.sleep(.025)
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed") | random_line_split |
|
jsonmaking.py | import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except:
print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
return random.randrange(start,end)
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def random_number_maker(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from list check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<random_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
|
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed")
| print "\r", str(i)+"%"
time.sleep(.025) | conditional_block |
PanoramaPreview.test.tsx | import { rest } from 'msw'
import { render, screen, waitFor } from '@testing-library/react'
import PanoramaPreview from './PanoramaPreview'
import joinUrl from '../../../../shared/utils/joinUrl'
import environment from '../../../../environment'
import { server } from '../../../../../test/server'
import { mapLayersParam, panoHeadingParam, panoPitchParam } from '../../query-params'
import { singleFixture } from '../../../../api/panorama/thumbnail'
import withMapContext from '../../../../shared/utils/withMapContext'
jest.mock('react-router-dom', () => ({
// @ts-ignore
...jest.requireActual('react-router-dom'),
useLocation: () => ({
pathname: '/data',
search: '?someOtherParam=1&lagen=random-layer',
}),
}))
describe('PanoramaPreview', () => {
it('should build a link including current parameters, panorama parameters and layers for panorama', async () => {
const panoramaThumbnailUrl = joinUrl([environment.API_ROOT, 'panorama/thumbnail'])
server.use(
rest.get(panoramaThumbnailUrl, async (req, res, ctx) => res(ctx.json(singleFixture))),
)
render(withMapContext(<PanoramaPreview location={{ lat: 1, lng: 2 }} />))
await waitFor(() => {
const link = screen.queryByTestId('panoramaPreview')?.querySelector('a')
const params = new URLSearchParams(link?.search) | expect(params.get('someOtherParam')).toContain('1')
expect(params.get(mapLayersParam.name)).toContain('random-layer')
expect(params.get(panoPitchParam.name)).toContain('0')
expect(params.get(panoHeadingParam.name)).toContain('131')
})
})
it('should render PanoAlert when API responses with a 403 Forbidden', async () => {
const panoramaThumbnailUrl = joinUrl([environment.API_ROOT, 'panorama/thumbnail/'])
server.use(
rest.get(panoramaThumbnailUrl, async (req, res, ctx) => {
return res(ctx.status(403))
}),
)
render(withMapContext(<PanoramaPreview location={{ lat: 1, lng: 2 }} />))
const panoAlert = await screen.findByTestId('panoAlert')
expect(panoAlert).toBeInTheDocument()
})
it('should not render the panorama preview if panoramaviewer is active', () => {
render(withMapContext(<PanoramaPreview location={{ lat: 1, lng: 2 }} />, { panoActive: true }))
expect(screen.queryByTestId('panoramaPreview')).not.toBeInTheDocument()
})
}) | random_line_split |
|
grad_modifiers.py | from __future__ import absolute_import
import numpy as np
from keras import backend as K
from .utils import utils
def negate(grads):
"""Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns: | The modified gradients that highlight small values.
"""
return absolute(invert(grads))
def get(identifier):
return utils.get_identifier(identifier, globals(), __name__) | random_line_split |
|
grad_modifiers.py | from __future__ import absolute_import
import numpy as np
from keras import backend as K
from .utils import utils
def negate(grads):
"""Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The modified gradients that highlight small values.
"""
return absolute(invert(grads))
def | (identifier):
return utils.get_identifier(identifier, globals(), __name__)
| get | identifier_name |
grad_modifiers.py | from __future__ import absolute_import
import numpy as np
from keras import backend as K
from .utils import utils
def negate(grads):
"""Negates the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The negated gradients.
"""
return -grads
def absolute(grads):
"""Computes absolute gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The absolute gradients.
"""
return np.abs(grads)
def invert(grads):
"""Inverts the gradients.
Args:
grads: A numpy array of grads to use.
Returns:
The inverted gradients.
"""
return 1. / (grads + K.epsilon())
def relu(grads):
"""Clips negative gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The rectified gradients.
"""
grads[grads < 0.] = 0.
return grads
def small_values(grads):
"""Can be used to highlight small gradient values.
Args:
grads: A numpy array of grads to use.
Returns:
The modified gradients that highlight small values.
"""
return absolute(invert(grads))
def get(identifier):
| return utils.get_identifier(identifier, globals(), __name__) | identifier_body |
|
hero-search.component.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { HeroSearchService } from './hero-search.service';
import { Hero } from './hero';
@Component({
moduleId: module.id,
selector: 'hero-search',
templateUrl: 'hero-search.component.html',
styleUrls: ['hero-search.component.css'],
providers: [HeroSearchService]
})
export class HeroSearchComponent implements OnInit {
heroes: Observable<Hero[]>;
private searchTerms = new Subject<string>();
| (
private heroSearchService: HeroSearchService,
private router: Router
) { }
// Push a search term into the observable stream.
search(term: string): void {
this.searchTerms.next(term);
}
ngOnInit(): void {
this.heroes = this.searchTerms
.debounceTime(300) // wait for 300ms pause in events
.distinctUntilChanged() // ignore if next search term is same as previous
.switchMap(term => term // switch to new observable each time
// return the http search observable
? this.heroSearchService.search(term)
// or the observable of empty heroes if no search term
: Observable.of<Hero[]>([]))
.catch(error => {
// TODO: real error handling
console.log(error);
return Observable.of<Hero[]>([]);
});
}
gotoDetail(hero: Hero): void {
let link = ['/detail', hero.id];
this.router.navigate(link);
}
}
| constructor | identifier_name |
hero-search.component.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { HeroSearchService } from './hero-search.service';
import { Hero } from './hero';
@Component({
moduleId: module.id,
selector: 'hero-search',
templateUrl: 'hero-search.component.html',
styleUrls: ['hero-search.component.css'],
providers: [HeroSearchService]
})
export class HeroSearchComponent implements OnInit {
heroes: Observable<Hero[]>;
private searchTerms = new Subject<string>();
constructor(
private heroSearchService: HeroSearchService,
private router: Router
) { }
// Push a search term into the observable stream.
search(term: string): void {
this.searchTerms.next(term);
}
ngOnInit(): void {
this.heroes = this.searchTerms
.debounceTime(300) // wait for 300ms pause in events
.distinctUntilChanged() // ignore if next search term is same as previous
.switchMap(term => term // switch to new observable each time
// return the http search observable
? this.heroSearchService.search(term)
// or the observable of empty heroes if no search term
: Observable.of<Hero[]>([]))
.catch(error => {
// TODO: real error handling
console.log(error);
return Observable.of<Hero[]>([]);
});
}
gotoDetail(hero: Hero): void {
let link = ['/detail', hero.id]; | }
} | this.router.navigate(link); | random_line_split |
lights.py |
import logging
import requests
HUE_IP = '192.168.86.32'
HUE_USERNAME = '7KcxItfntdF0DuWV9t0GPMeToEBlvHTgqWNZqxu6'
logger = logging.getLogger('hue')
def getLights():
url = 'http://{0}/api/{1}/lights'.format(HUE_IP, HUE_USERNAME)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for all lights')
return
if r.status_code == 200:
|
def getStatus(id):
url = 'http://{0}/api/{1}/lights/{2}'.format(HUE_IP, HUE_USERNAME, id)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for light {0}'.format (id))
return
if r.status_code == 200:
data = r.json()
return data
| data = r.json()
return data | conditional_block |
lights.py | import logging
import requests
HUE_IP = '192.168.86.32'
HUE_USERNAME = '7KcxItfntdF0DuWV9t0GPMeToEBlvHTgqWNZqxu6'
|
url = 'http://{0}/api/{1}/lights'.format(HUE_IP, HUE_USERNAME)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for all lights')
return
if r.status_code == 200:
data = r.json()
return data
def getStatus(id):
url = 'http://{0}/api/{1}/lights/{2}'.format(HUE_IP, HUE_USERNAME, id)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for light {0}'.format (id))
return
if r.status_code == 200:
data = r.json()
return data | logger = logging.getLogger('hue')
def getLights(): | random_line_split |
lights.py |
import logging
import requests
HUE_IP = '192.168.86.32'
HUE_USERNAME = '7KcxItfntdF0DuWV9t0GPMeToEBlvHTgqWNZqxu6'
logger = logging.getLogger('hue')
def getLights():
url = 'http://{0}/api/{1}/lights'.format(HUE_IP, HUE_USERNAME)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for all lights')
return
if r.status_code == 200:
data = r.json()
return data
def | (id):
url = 'http://{0}/api/{1}/lights/{2}'.format(HUE_IP, HUE_USERNAME, id)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for light {0}'.format (id))
return
if r.status_code == 200:
data = r.json()
return data
| getStatus | identifier_name |
lights.py |
import logging
import requests
HUE_IP = '192.168.86.32'
HUE_USERNAME = '7KcxItfntdF0DuWV9t0GPMeToEBlvHTgqWNZqxu6'
logger = logging.getLogger('hue')
def getLights():
|
def getStatus(id):
url = 'http://{0}/api/{1}/lights/{2}'.format(HUE_IP, HUE_USERNAME, id)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for light {0}'.format (id))
return
if r.status_code == 200:
data = r.json()
return data
| url = 'http://{0}/api/{1}/lights'.format(HUE_IP, HUE_USERNAME)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for all lights')
return
if r.status_code == 200:
data = r.json()
return data | identifier_body |
SetContainsAllCodec.ts | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable max-len */
import {BitsUtil} from '../util/BitsUtil';
import {FixSizedTypesCodec} from './builtin/FixSizedTypesCodec';
import {ClientMessage, Frame, RESPONSE_BACKUP_ACKS_OFFSET, PARTITION_ID_OFFSET} from '../protocol/ClientMessage';
import {StringCodec} from './builtin/StringCodec';
import {Data} from '../serialization/Data';
import {ListMultiFrameCodec} from './builtin/ListMultiFrameCodec';
import {DataCodec} from './builtin/DataCodec';
// hex: 0x060300
const REQUEST_MESSAGE_TYPE = 393984;
// hex: 0x060301
// RESPONSE_MESSAGE_TYPE = 393985
const REQUEST_INITIAL_FRAME_SIZE = PARTITION_ID_OFFSET + BitsUtil.INT_SIZE_IN_BYTES;
const RESPONSE_RESPONSE_OFFSET = RESPONSE_BACKUP_ACKS_OFFSET + BitsUtil.BYTE_SIZE_IN_BYTES;
/** @internal */
export class SetContainsAllCodec {
static encodeRequest(name: string, items: Data[]): ClientMessage {
const clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
const initialFrame = Frame.createInitialFrame(REQUEST_INITIAL_FRAME_SIZE);
clientMessage.addFrame(initialFrame);
clientMessage.setMessageType(REQUEST_MESSAGE_TYPE);
clientMessage.setPartitionId(-1);
StringCodec.encode(clientMessage, name);
ListMultiFrameCodec.encode(clientMessage, items, DataCodec.encode);
return clientMessage;
}
static | (clientMessage: ClientMessage): boolean {
const initialFrame = clientMessage.nextFrame();
return FixSizedTypesCodec.decodeBoolean(initialFrame.content, RESPONSE_RESPONSE_OFFSET);
}
}
| decodeResponse | identifier_name |
SetContainsAllCodec.ts | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable max-len */
import {BitsUtil} from '../util/BitsUtil';
import {FixSizedTypesCodec} from './builtin/FixSizedTypesCodec';
import {ClientMessage, Frame, RESPONSE_BACKUP_ACKS_OFFSET, PARTITION_ID_OFFSET} from '../protocol/ClientMessage';
import {StringCodec} from './builtin/StringCodec';
import {Data} from '../serialization/Data';
import {ListMultiFrameCodec} from './builtin/ListMultiFrameCodec';
import {DataCodec} from './builtin/DataCodec';
// hex: 0x060300
const REQUEST_MESSAGE_TYPE = 393984;
// hex: 0x060301
// RESPONSE_MESSAGE_TYPE = 393985
const REQUEST_INITIAL_FRAME_SIZE = PARTITION_ID_OFFSET + BitsUtil.INT_SIZE_IN_BYTES;
const RESPONSE_RESPONSE_OFFSET = RESPONSE_BACKUP_ACKS_OFFSET + BitsUtil.BYTE_SIZE_IN_BYTES;
/** @internal */
export class SetContainsAllCodec {
static encodeRequest(name: string, items: Data[]): ClientMessage {
const clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
const initialFrame = Frame.createInitialFrame(REQUEST_INITIAL_FRAME_SIZE);
clientMessage.addFrame(initialFrame);
clientMessage.setMessageType(REQUEST_MESSAGE_TYPE);
clientMessage.setPartitionId(-1);
StringCodec.encode(clientMessage, name);
ListMultiFrameCodec.encode(clientMessage, items, DataCodec.encode);
return clientMessage;
}
static decodeResponse(clientMessage: ClientMessage): boolean {
const initialFrame = clientMessage.nextFrame();
return FixSizedTypesCodec.decodeBoolean(initialFrame.content, RESPONSE_RESPONSE_OFFSET);
}
} | * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
output_disposition.py | '''
Created on Sep 14, 2013
@author: paepcke
Modifications:
- Jan 1, 2013: added remove() method to OutputFile
'''
import StringIO
from collections import OrderedDict
import csv
import re
import sys
import os
import tempfile
from col_data_type import ColDataType
class OutputDisposition(object):
'''
Specifications for where completed relation rows
should be deposited, and in which format. Current
output options are to files, and to stdout.
This class is abstract, but make sure the subclasses
invoke this super's __init__() when they are initialized.
Also defined here are available output formats, of
which there are two: CSV, and SQL insert statements AND
CSV.
NOTE: currently the CSV-only format option is broken. Not
enough time to maintain it.
SQL insert statements that are directed to files will also
generate equivalent .csv files. The insert statement files
will look like the result of a mysqldump, and inserts into
different tables are mixed. The corresponding (values-only)
csv files are split: one file for each table.
'''
def __init__(self, outputFormat, outputDestObj=None):
'''
:param outputDestObj: instance of one of the subclasses
:type outputDestObj: Subclass(OutputDisposition)
'''
self.outputFormat = outputFormat
if outputDestObj is None:
self.outputDest = self
else:
self.outputDest = outputDestObj
self.csvTableFiles = {}
self.schemas = TableSchemas()
def __enter__(self):
return self.outputDest
def __exit__(self,excType, excValue, excTraceback):
try:
self.outputDest.close()
except:
# If the conversion itself went fine, then
# raise this exception from the closing attempt.
# But if the conversion failed, then have the
# system re-raise that earlier exception:
if excValue is None:
raise IOError("Could not close the output of the conversion: %s" % sys.exc_info()[0])
# Return False to indicate that if the conversion
# threw an error, the exception should now be re-raised.
# If the conversion worked fine, then this return value
# is ignored.
return False
def flush(self):
self.outputDest.flush()
def getOutputFormat(self):
return self.outputFormat
def addSchemaHints(self, tableName, schemaHints):
'''
Provide a schema hint dict for the table of the given name.
:param tableName: name of table to which schema applies. The name may be None, in which case it refers to the main (default) table.
:type tableName: String
:param schemaHints: dict mapping column names to SQL types via ColumnSpec instances
:type schemaHints: [ordered]Dict<String,ColumnSpec>
'''
self.schemas.addColSpecs(tableName, schemaHints)
def getSchemaHint(self, colName, tableName):
'''
Given a column name, and a table name, return the ColumnSpec object
that describes that column. If tableName is None, the main (default)
table's schema will be searched for a colName entry
:param colName: name of column whose schema info is sought
:type colName: String
:param tableName: name of table in which the given column resides
:type tableName: String
:return: list of ColumnSpec instances
:rtype: (ColumnSpec)
@raise KeyError: if table or column are not found
'''
return self.schemas[tableName][colName]
def getSchemaHintByPos(self, pos, tableName):
try:
return self.schemas[tableName].values()[pos]
except ValueError:
return None
except IndexError:
raise ValueError("Attempt to access pos %s in schema for table %s, which is shorter than %s: %s") %\
(str(pos), tableName, str(pos), self.schemas[tableName].values())
def getSchema(self, tableName):
try:
return self.schemas[tableName].values()
except ValueError:
return None
def copySchemas(self, destDisposition):
'''
Given another instance of OutputDisposition,
copy this instance's schemas to the destination.
:param destDisposition: another instance of OutputDisposition
:type destDisposition: OutputDisposition
'''
destDisposition.schemas = self.schemas
def ensureColExistence(self, colName, colDataType, jsonToRelationConverter, tableName=None):
'''
Given a column name and MySQL datatype name, check whether this
column has previously been encountered. If not, a column information
object is created, which will eventually be used to create the column
header, or SQL alter statements.
:param colName: name of the column to consider
:type colName: String
:param colDataType: datatype of the column.
:type colDataType: ColDataType
:param tableName: name of table to which the column is to belong; None if for main table
:type tableName: {String | None}
'''
schemaDict = self.schemas[tableName]
if schemaDict is None or len(schemaDict) == 0:
# schema for this table definitely does not have the column:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
self.schemas[tableName] = OrderedDict({colName : colSpecObj})
return
# Have schema (dict) for this table. Does that dict contain
# an entry for the col name?
try:
schemaDict[colName]
# all set:
return
except KeyError:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
schemaDict[colName] = colSpecObj
def createTmpTableFile(self, tableName, fileSuffix):
'''
Used for cases in which parsers must create more than one
table. Those tables need to be written to disk, even when
output of the main table is piped.
:param tableName: name by which the table file obj can be retrieved
:type tableName: String
:param fileSuffix: suffix for temp file name. Ex. 'csv' for CSV outputs, or 'sql' for SQL dumps
:type fileSuffix: String
:return: file object open for writing
:rtype: File
'''
self.csvTableFiles[tableName] = tempfile.NamedTemporaryFile(prefix='tmpTable',
suffix=fileSuffix)
return self.csvTableFiles[tableName]
#--------------------- Available Output Formats
class OutputFormat():
CSV = 0
SQL_INSERT_STATEMENTS = 1
SQL_INSERTS_AND_CSV = 2
#--------------------- Available Output Destination Options:
class OutputPipe(OutputDisposition):
def __init__(self, outputFormat):
super(OutputPipe, self).__init__(outputFormat)
self.fileHandle = sys.stdout
# Make file name accessible as property just like
# Python file objects do:
self.name = "<stdout>" # @UnusedVariable
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
self.tableCSVWriters = {}
def | (self):
pass # don't close stdout
def flush(self):
sys.stdout.flush()
def __str__(self):
return "<OutputPipe:<stdout>"
def writerow(self, colElementArray, tableName=None):
# For CSV: make sure everything is a string:
if self.outputFormat == OutputDisposition.OutputFormat.CSV:
row = map(str,colElementArray)
if tableName is None:
self.csvWriter.writerow(row)
else:
self.tableCSVWriters[tableName].writerow(row)
else:
print(colElementArray)
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table.
:param schemaHintsNewTable:
:type schemaHintsNewTable:
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
tmpTableFile = self.createTmpTableFile(tableName, 'csv')
self.tableCSVWriters[tableName] = csv.writer(tmpTableFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
sys.stdout.write(whatToWrite)
sys.stdout.flush()
def getCSVTableOutFileName(self, tableName):
return self.name
class OutputFile(OutputDisposition):
# When looking at INSERT INTO tableName (...,
# grab 'tableName':
TABLE_NAME_PATTERN = re.compile(r'[^\s]*\s[^\s]*\s([^\s]*)\s')
# When looking at:" ('7a286e24_b578_4741_b6e0_c0e8596bd456','Mozil...);\n"
# grab everything inside the parens, including the trailing ');\n', which
# we'll cut out in the code:
VALUES_PATTERN = re.compile(r'^[\s]{4}\(([^\n]*)\n{0,1}')
def __init__(self, fileName, outputFormat, options='ab'):
'''
Create instance of an output file destination for converted log files.
Such an instance is created both for OutputFormat.SQL_INSERT_STATEMENTS and
for OutputFormat.CSV. In the Insert statements case the fileName is the file
where all INSERT statements are placed; i.e. the entire dump. If the output format
is CSV, then the fileName is a prefix for the file names of each generated CSV file
(one file for each table).
:param fileName: fully qualified name of output file for CSV (in case of CSV-only),
or MySQL INSERT statement dump
:type fileName: String
:param outputFormat: whether to output CSV or MySQL INSERT statements
:type outputFormat: OutputDisposition.OutputFormat
:param options: output file options as per Python built-in 'open()'. Defaults to append/binary. The
latter for compatibility with Windows
:type options: String
'''
super(OutputFile, self).__init__(outputFormat)
# Make file name accessible as property just like
# Python file objects do:
self.name = fileName # @UnusedVariable
self.outputFormat = outputFormat
# Open the output file as 'append' and 'binary'
# The latter is needed for Windows.
self.fileHandle = open(fileName, options)
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if outputFormat == OutputDisposition.OutputFormat.CSV or\
outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
# Prepare for CSV files needed for the tables:
self.tableCSVWriters = {}
def close(self):
self.fileHandle.close()
# Also close any CSV out files that might exist:
try:
for csvFD in self.csvTableFiles.values():
csvFD.close()
except:
pass
def flush(self):
self.fileHandle.flush()
for csvFD in self.tableCSVWriters.values():
try:
csvFD.flush()
except:
pass
def remove(self):
try:
os.remove(self.fileHandle.name)
except:
pass
def __str__(self):
return "<OutputFile:%s>" % self.getFileName()
def getFileName(self, tableName=None):
'''
Get file name of a MySQL INSERT statement outfile,
or, given a table name, the name of the outfile
for CSV destined to the given table.
:param tableName:
:type tableName:
'''
if tableName is None:
return self.name
else:
fd = self.csvTableFiles.get(tableName, None)
if fd is None:
return None
return fd.name
def writerow(self, colElementArray, tableName=None):
'''
How I wish Python had parameter type based polymorphism. Life
would be so much cleaner.
ColElementArray is either an array of values (coming from
a CSV-only parser), or a string that contains a complete
MySQL INSERT statement (from MySQL dump-creating parsers).
In the first case, we ensure all elements in the array are
strings, and write to output. In the latter case we write
the INSERT statements to their output file. Then, if output
format is SQL_INSERTS_AND_CSV, we also extract the MySQL
values and write them to the proper CSV file.
:param colElementArray: either a MySQL INSERT statement, or an array of values
:type colElementArray: {String | [string]}
:param tableName: name of table to which output is destined. Only needed for
value arrays from CSV-only parsers. Their value arrays don't contain
info on the destination table. INSERT statements do contain the destination table
name.
:type tableName: String
'''
if isinstance(colElementArray, list):
# Simple CSV array of values;
# make sure every array element is a string:
row = map(str,colElementArray)
if tableName is None:
# The main (and maybe only) table:
self.csvWriter.writerow(row)
else:
# One of the other tables for which files
# were opened during calls to startNewTable():
self.tableCSVWriters[tableName].writerow(row)
else:
# We are either outputting INSERT statements, or
# both those and CSV, or just CSV derived from a
# full MySQL INSERT parser, like edxTrackLogJSONParser.
# Start with the INSERTS:
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.fileHandle.write(colElementArray + '\n')
# If we are outputting either CSV or INSERTs and CSV, do the CSV
# part now:
if self.outputFormat != OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
# Strip the CSV parts out from the INSERT statement, which may
# contain multiple VALUE statements:
self.writeCSVRowsFromInsertStatement(colElementArray)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
self.fileHandle.write(whatToWrite)
self.fileHandle.flush()
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table (in case of CSV-Only), or any table
in case of SQLInsert+CSV.
:param tableName: name of new table
:type tableName: string
:param schemaHintsNewTable: map column name to column SQL type
:type schemaHintsNewTable: {String,ColDataType}
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
return
# We are producing CSV (possibly in addition to Inserts):
try:
# Already have a table writer for this table?
self.tableCSVWriters[tableName]
return # yep
except KeyError:
# OK, really is a new table caller is starting:
pass
# Ensure that we have an open FD to write to for this table:
if self.outputFormat == OutputDisposition.OutputFormat.CSV or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.ensureOpenCSVOutFileFromTableName(tableName)
def ensureOpenCSVOutFileFromTableName(self, tableName):
'''
Checks whether an open File object exists for the given
table. If not, creates one. Returns the FD. The output
file is created in the same directory as self.out
:param tableName: name of table whose CSV output file we are to check for, or create
:type tableName: String
:return: a File object open for writing/appending
:rtype: File
'''
try:
# If we already have an FD for this table, return:
return self.tableCSVWriters[tableName]
except KeyError:
# Else create one below:
pass
outFileName = self.getFileName()
if outFileName == '/dev/null':
outFile = open('/dev/null', 'ab')
self.csvTableFiles[tableName] = outFile
return outFile
csvOutFileName = self.getCSVTableOutFileName(tableName)
outFile = open(csvOutFileName, 'w')
self.csvTableFiles[tableName] = outFile
self.tableCSVWriters[tableName] = csv.writer(outFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
return self.tableCSVWriters[tableName]
def getCSVTableOutFileName(self, tableName):
# The 'None' below ensures that we get the
# main file's name back:
return "%s_%sTable.csv" % (self.getFileName(None), tableName)
def writeCSVRowsFromInsertStatement(self, insertStatement):
'''
Takes one SQL INSERT INTO Statement, possibly including multiple VALUES
lines. Extracts the destination table and the values list(s), and writes
them to disk via the appropriate CSVWriter. The INSERT statements are
expected to be very regular, generated by json_to_relation. Don't use
this method for arbitrary INSERT statements, b/c it relies on regular
expressions that expect the specific format. Prerequisite: self.tableCSVWriters
is a dictionary that maps table names into File objects that are open
for writing.
:param insertStatement: Well-formed MySQL INSERT statement
:type insertStatement: String
@raise ValueError: if table name could not be extracted from the
INSERT statement, or if the insertStatement contains no VALUES
clause.
'''
inFD = StringIO.StringIO(insertStatement)
try:
firstLine = inFD.readline()
# Pick out the name of the table to which CSV is to be added:
tblNameMatch = OutputFile.TABLE_NAME_PATTERN.search(firstLine)
if tblNameMatch is None:
raise ValueError('No match when trying to extract table name from "%s"' % insertStatement)
tblName = tblNameMatch.group(1)
except IndexError:
raise ValueError('Could not extract table name from "%s"' % insertStatement)
readAllValueTuples = False
while not readAllValueTuples:
# Get values list that belongs to this insert statement:
valuesLine = inFD.readline()
if not valuesLine.startswith(' ('):
readAllValueTuples = True
continue
# Extract the comma-separated values list out from the parens;
# first get "'fasdrew_fdsaf...',...);\n":
oneValuesLineMatch = OutputFile.VALUES_PATTERN.search(valuesLine)
if oneValuesLineMatch is None:
# Hopefully never happens:
raise ValueError('No match for values line "%s"' % insertStatement)
# Get just the comma-separated values list from
# 'abfd_sfd,...);\n
valuesList = oneValuesLineMatch.group(1)[:-2] + '\n'
# Make sure we've seen additions to this table before or,
# if not, have a CSV writer and a file created to receive
# the CSV lines:
self.ensureOpenCSVOutFileFromTableName(tblName)
theOutFd = self.csvTableFiles[tblName]
theOutFd.write(valuesList)
class ColumnSpec(object):
'''
Housekeeping class. Each instance represents the name,
position, and datatype of one column. These instances are
used to generate column name headers, and
SQL insert statements.
'''
def __init__(self, colName, colDataType, jsonToRelationProcessor):
'''
Create a ColumnSpec instance.
:param colName: name of column
:type colName: String
:param colDataType: data type of column (an enum)
:type colDataType: ColumnSpec
:param jsonToRelationProcessor: associated JSON to relation JSONToRelation instance
:type jsonToRelationProcessor: JSONToRelation
'''
self.colName = colName
self.colDataType = colDataType
self.colPos = jsonToRelationProcessor.getNextNewColPos()
jsonToRelationProcessor.bumpNextNewColPos()
def getDefaultValue(self):
return ColDataType().defaultValues[self.colDataType]
def getName(self):
'''
Return column name
:return: name of column
:rtype: String
'''
return self.colName
def getType(self):
'''
Return SQL type
:return: SQL type of colum in upper case
:rtype: String
'''
return ColDataType().toString(self.colDataType).upper()
def getSQLDefSnippet(self):
'''
Return string snippet to use in SQL CREATE TABLE or ALTER TABLE
statement
'''
return " %s %s" % (self.getName(), self.getType())
def __str__(self):
return "<Col %s: %s (position %s)>" % (self.colName,
self.getType(),
self.colPos)
def __repr__(self):
return self.__str__()
class TableSchemas(object):
'''
Repository for the schemas of all tables. A schema is an
array ColumnSpec instances. Each such list is associated with
one relational table. A class var dict holds the schemas for
all tables.
'''
def __init__(self):
self.allSchemas = OrderedDict()
# Add empty schema for main (default) table:
self.allSchemas[None] = OrderedDict()
def __getitem__(self, tableName):
return self.allSchemas[tableName]
def __setitem__(self, tableName, colSpecsDict):
self.allSchemas[tableName] = colSpecsDict
def keys(self):
return self.allSchemas.keys()
def addColSpec(self, tableName, colSpec):
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = {colSpec.getName() : colSpec}
schema = self.allSchemas[tableName]
schema[colSpec.getName()] = colSpec
def addColSpecs(self, tableName, colSpecsDict):
if not isinstance(colSpecsDict, OrderedDict):
raise ValueError("ColumSpec parameter must be a dictionary<ColName,ColumnSpec>")
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = colSpecsDict
schema = self.allSchemas[tableName]
# Change schema to include the new dict:
schema.update(colSpecsDict)
| close | identifier_name |
output_disposition.py | '''
Created on Sep 14, 2013
@author: paepcke
Modifications:
- Jan 1, 2013: added remove() method to OutputFile
'''
import StringIO
from collections import OrderedDict
import csv
import re
import sys
import os
import tempfile
from col_data_type import ColDataType
class OutputDisposition(object):
'''
Specifications for where completed relation rows
should be deposited, and in which format. Current
output options are to files, and to stdout.
This class is abstract, but make sure the subclasses
invoke this super's __init__() when they are initialized.
Also defined here are available output formats, of
which there are two: CSV, and SQL insert statements AND
CSV.
NOTE: currently the CSV-only format option is broken. Not
enough time to maintain it.
SQL insert statements that are directed to files will also
generate equivalent .csv files. The insert statement files
will look like the result of a mysqldump, and inserts into
different tables are mixed. The corresponding (values-only)
csv files are split: one file for each table.
'''
def __init__(self, outputFormat, outputDestObj=None):
'''
:param outputDestObj: instance of one of the subclasses
:type outputDestObj: Subclass(OutputDisposition)
'''
self.outputFormat = outputFormat
if outputDestObj is None:
self.outputDest = self
else:
self.outputDest = outputDestObj
self.csvTableFiles = {}
self.schemas = TableSchemas()
def __enter__(self):
return self.outputDest
def __exit__(self,excType, excValue, excTraceback):
try:
self.outputDest.close()
except:
# If the conversion itself went fine, then
# raise this exception from the closing attempt.
# But if the conversion failed, then have the
# system re-raise that earlier exception:
if excValue is None:
raise IOError("Could not close the output of the conversion: %s" % sys.exc_info()[0])
# Return False to indicate that if the conversion
# threw an error, the exception should now be re-raised.
# If the conversion worked fine, then this return value
# is ignored.
return False
def flush(self):
self.outputDest.flush()
def getOutputFormat(self):
return self.outputFormat
def addSchemaHints(self, tableName, schemaHints):
'''
Provide a schema hint dict for the table of the given name.
:param tableName: name of table to which schema applies. The name may be None, in which case it refers to the main (default) table.
:type tableName: String
:param schemaHints: dict mapping column names to SQL types via ColumnSpec instances
:type schemaHints: [ordered]Dict<String,ColumnSpec>
'''
self.schemas.addColSpecs(tableName, schemaHints)
def getSchemaHint(self, colName, tableName):
'''
Given a column name, and a table name, return the ColumnSpec object
that describes that column. If tableName is None, the main (default)
table's schema will be searched for a colName entry
:param colName: name of column whose schema info is sought
:type colName: String
:param tableName: name of table in which the given column resides
:type tableName: String
:return: list of ColumnSpec instances
:rtype: (ColumnSpec)
@raise KeyError: if table or column are not found
'''
return self.schemas[tableName][colName]
def getSchemaHintByPos(self, pos, tableName):
try:
return self.schemas[tableName].values()[pos]
except ValueError:
return None
except IndexError:
raise ValueError("Attempt to access pos %s in schema for table %s, which is shorter than %s: %s") %\
(str(pos), tableName, str(pos), self.schemas[tableName].values())
def getSchema(self, tableName):
try:
return self.schemas[tableName].values()
except ValueError:
return None
def copySchemas(self, destDisposition):
'''
Given another instance of OutputDisposition,
copy this instance's schemas to the destination.
:param destDisposition: another instance of OutputDisposition
:type destDisposition: OutputDisposition
'''
destDisposition.schemas = self.schemas
def ensureColExistence(self, colName, colDataType, jsonToRelationConverter, tableName=None):
'''
Given a column name and MySQL datatype name, check whether this
column has previously been encountered. If not, a column information
object is created, which will eventually be used to create the column
header, or SQL alter statements.
:param colName: name of the column to consider
:type colName: String
:param colDataType: datatype of the column.
:type colDataType: ColDataType
:param tableName: name of table to which the column is to belong; None if for main table
:type tableName: {String | None}
'''
schemaDict = self.schemas[tableName]
if schemaDict is None or len(schemaDict) == 0:
# schema for this table definitely does not have the column:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
self.schemas[tableName] = OrderedDict({colName : colSpecObj})
return
# Have schema (dict) for this table. Does that dict contain
# an entry for the col name?
try:
schemaDict[colName]
# all set:
return
except KeyError:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
schemaDict[colName] = colSpecObj
def createTmpTableFile(self, tableName, fileSuffix):
'''
Used for cases in which parsers must create more than one
table. Those tables need to be written to disk, even when
output of the main table is piped.
:param tableName: name by which the table file obj can be retrieved
:type tableName: String
:param fileSuffix: suffix for temp file name. Ex. 'csv' for CSV outputs, or 'sql' for SQL dumps
:type fileSuffix: String
:return: file object open for writing
:rtype: File
'''
self.csvTableFiles[tableName] = tempfile.NamedTemporaryFile(prefix='tmpTable',
suffix=fileSuffix)
return self.csvTableFiles[tableName]
#--------------------- Available Output Formats
class OutputFormat():
CSV = 0
SQL_INSERT_STATEMENTS = 1
SQL_INSERTS_AND_CSV = 2
#--------------------- Available Output Destination Options:
class OutputPipe(OutputDisposition):
def __init__(self, outputFormat):
super(OutputPipe, self).__init__(outputFormat)
self.fileHandle = sys.stdout
# Make file name accessible as property just like
# Python file objects do:
self.name = "<stdout>" # @UnusedVariable
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
self.tableCSVWriters = {}
def close(self):
pass # don't close stdout
def flush(self):
sys.stdout.flush()
def __str__(self):
return "<OutputPipe:<stdout>"
def writerow(self, colElementArray, tableName=None):
# For CSV: make sure everything is a string:
if self.outputFormat == OutputDisposition.OutputFormat.CSV:
row = map(str,colElementArray)
if tableName is None:
self.csvWriter.writerow(row)
else:
self.tableCSVWriters[tableName].writerow(row)
else:
print(colElementArray)
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table.
:param schemaHintsNewTable:
:type schemaHintsNewTable:
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
tmpTableFile = self.createTmpTableFile(tableName, 'csv')
self.tableCSVWriters[tableName] = csv.writer(tmpTableFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
sys.stdout.write(whatToWrite)
sys.stdout.flush()
def getCSVTableOutFileName(self, tableName):
return self.name
class OutputFile(OutputDisposition):
# When looking at INSERT INTO tableName (...,
# grab 'tableName':
TABLE_NAME_PATTERN = re.compile(r'[^\s]*\s[^\s]*\s([^\s]*)\s')
# When looking at:" ('7a286e24_b578_4741_b6e0_c0e8596bd456','Mozil...);\n"
# grab everything inside the parens, including the trailing ');\n', which
# we'll cut out in the code:
VALUES_PATTERN = re.compile(r'^[\s]{4}\(([^\n]*)\n{0,1}')
def __init__(self, fileName, outputFormat, options='ab'):
'''
Create instance of an output file destination for converted log files.
Such an instance is created both for OutputFormat.SQL_INSERT_STATEMENTS and
for OutputFormat.CSV. In the Insert statements case the fileName is the file
where all INSERT statements are placed; i.e. the entire dump. If the output format
is CSV, then the fileName is a prefix for the file names of each generated CSV file
(one file for each table).
:param fileName: fully qualified name of output file for CSV (in case of CSV-only),
or MySQL INSERT statement dump
:type fileName: String
:param outputFormat: whether to output CSV or MySQL INSERT statements
:type outputFormat: OutputDisposition.OutputFormat
:param options: output file options as per Python built-in 'open()'. Defaults to append/binary. The
latter for compatibility with Windows
:type options: String
'''
super(OutputFile, self).__init__(outputFormat)
# Make file name accessible as property just like
# Python file objects do:
self.name = fileName # @UnusedVariable
self.outputFormat = outputFormat
# Open the output file as 'append' and 'binary'
# The latter is needed for Windows.
self.fileHandle = open(fileName, options)
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if outputFormat == OutputDisposition.OutputFormat.CSV or\
outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
# Prepare for CSV files needed for the tables:
self.tableCSVWriters = {}
def close(self):
self.fileHandle.close()
# Also close any CSV out files that might exist:
try:
for csvFD in self.csvTableFiles.values():
csvFD.close()
except:
pass
def flush(self):
self.fileHandle.flush()
for csvFD in self.tableCSVWriters.values():
try:
csvFD.flush()
except:
pass
def remove(self):
try:
os.remove(self.fileHandle.name)
except:
pass
def __str__(self):
return "<OutputFile:%s>" % self.getFileName()
def getFileName(self, tableName=None):
'''
Get file name of a MySQL INSERT statement outfile,
or, given a table name, the name of the outfile
for CSV destined to the given table.
:param tableName:
:type tableName:
'''
if tableName is None:
return self.name
else:
fd = self.csvTableFiles.get(tableName, None)
if fd is None:
return None
return fd.name
def writerow(self, colElementArray, tableName=None):
'''
How I wish Python had parameter type based polymorphism. Life
would be so much cleaner.
ColElementArray is either an array of values (coming from
a CSV-only parser), or a string that contains a complete
MySQL INSERT statement (from MySQL dump-creating parsers).
In the first case, we ensure all elements in the array are
strings, and write to output. In the latter case we write
the INSERT statements to their output file. Then, if output
format is SQL_INSERTS_AND_CSV, we also extract the MySQL
values and write them to the proper CSV file.
:param colElementArray: either a MySQL INSERT statement, or an array of values
:type colElementArray: {String | [string]}
:param tableName: name of table to which output is destined. Only needed for
value arrays from CSV-only parsers. Their value arrays don't contain
info on the destination table. INSERT statements do contain the destination table
name.
:type tableName: String
'''
if isinstance(colElementArray, list):
# Simple CSV array of values;
# make sure every array element is a string:
row = map(str,colElementArray)
if tableName is None:
# The main (and maybe only) table:
self.csvWriter.writerow(row)
else:
# One of the other tables for which files
# were opened during calls to startNewTable():
self.tableCSVWriters[tableName].writerow(row)
else:
# We are either outputting INSERT statements, or
# both those and CSV, or just CSV derived from a
# full MySQL INSERT parser, like edxTrackLogJSONParser.
# Start with the INSERTS:
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.fileHandle.write(colElementArray + '\n')
# If we are outputting either CSV or INSERTs and CSV, do the CSV
# part now:
if self.outputFormat != OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
# Strip the CSV parts out from the INSERT statement, which may
# contain multiple VALUE statements:
self.writeCSVRowsFromInsertStatement(colElementArray)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
self.fileHandle.write(whatToWrite)
self.fileHandle.flush()
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table (in case of CSV-Only), or any table
in case of SQLInsert+CSV.
:param tableName: name of new table
:type tableName: string
:param schemaHintsNewTable: map column name to column SQL type
:type schemaHintsNewTable: {String,ColDataType}
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
return
# We are producing CSV (possibly in addition to Inserts):
try:
# Already have a table writer for this table?
self.tableCSVWriters[tableName]
return # yep
except KeyError:
# OK, really is a new table caller is starting:
pass
# Ensure that we have an open FD to write to for this table:
if self.outputFormat == OutputDisposition.OutputFormat.CSV or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.ensureOpenCSVOutFileFromTableName(tableName)
def ensureOpenCSVOutFileFromTableName(self, tableName):
'''
Checks whether an open File object exists for the given
table. If not, creates one. Returns the FD. The output
file is created in the same directory as self.out
:param tableName: name of table whose CSV output file we are to check for, or create
:type tableName: String
:return: a File object open for writing/appending
:rtype: File
'''
try:
# If we already have an FD for this table, return:
return self.tableCSVWriters[tableName]
except KeyError:
# Else create one below:
pass
outFileName = self.getFileName()
if outFileName == '/dev/null':
outFile = open('/dev/null', 'ab')
self.csvTableFiles[tableName] = outFile
return outFile
csvOutFileName = self.getCSVTableOutFileName(tableName)
outFile = open(csvOutFileName, 'w')
self.csvTableFiles[tableName] = outFile
self.tableCSVWriters[tableName] = csv.writer(outFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
return self.tableCSVWriters[tableName]
def getCSVTableOutFileName(self, tableName):
# The 'None' below ensures that we get the
# main file's name back:
return "%s_%sTable.csv" % (self.getFileName(None), tableName)
def writeCSVRowsFromInsertStatement(self, insertStatement):
'''
Takes one SQL INSERT INTO Statement, possibly including multiple VALUES
lines. Extracts the destination table and the values list(s), and writes
them to disk via the appropriate CSVWriter. The INSERT statements are
expected to be very regular, generated by json_to_relation. Don't use
this method for arbitrary INSERT statements, b/c it relies on regular
expressions that expect the specific format. Prerequisite: self.tableCSVWriters
is a dictionary that maps table names into File objects that are open
for writing.
:param insertStatement: Well-formed MySQL INSERT statement
:type insertStatement: String
@raise ValueError: if table name could not be extracted from the
INSERT statement, or if the insertStatement contains no VALUES
clause.
'''
inFD = StringIO.StringIO(insertStatement)
try:
firstLine = inFD.readline()
# Pick out the name of the table to which CSV is to be added:
tblNameMatch = OutputFile.TABLE_NAME_PATTERN.search(firstLine)
if tblNameMatch is None:
raise ValueError('No match when trying to extract table name from "%s"' % insertStatement)
tblName = tblNameMatch.group(1)
except IndexError:
raise ValueError('Could not extract table name from "%s"' % insertStatement)
readAllValueTuples = False
while not readAllValueTuples:
# Get values list that belongs to this insert statement:
valuesLine = inFD.readline()
if not valuesLine.startswith(' ('):
readAllValueTuples = True
continue
# Extract the comma-separated values list out from the parens;
# first get "'fasdrew_fdsaf...',...);\n":
oneValuesLineMatch = OutputFile.VALUES_PATTERN.search(valuesLine)
if oneValuesLineMatch is None:
# Hopefully never happens:
raise ValueError('No match for values line "%s"' % insertStatement)
# Get just the comma-separated values list from
# 'abfd_sfd,...);\n
valuesList = oneValuesLineMatch.group(1)[:-2] + '\n'
# Make sure we've seen additions to this table before or,
# if not, have a CSV writer and a file created to receive
# the CSV lines:
self.ensureOpenCSVOutFileFromTableName(tblName)
theOutFd = self.csvTableFiles[tblName]
theOutFd.write(valuesList) | '''
Housekeeping class. Each instance represents the name,
position, and datatype of one column. These instances are
used to generate column name headers, and
SQL insert statements.
'''
def __init__(self, colName, colDataType, jsonToRelationProcessor):
'''
Create a ColumnSpec instance.
:param colName: name of column
:type colName: String
:param colDataType: data type of column (an enum)
:type colDataType: ColumnSpec
:param jsonToRelationProcessor: associated JSON to relation JSONToRelation instance
:type jsonToRelationProcessor: JSONToRelation
'''
self.colName = colName
self.colDataType = colDataType
self.colPos = jsonToRelationProcessor.getNextNewColPos()
jsonToRelationProcessor.bumpNextNewColPos()
def getDefaultValue(self):
return ColDataType().defaultValues[self.colDataType]
def getName(self):
'''
Return column name
:return: name of column
:rtype: String
'''
return self.colName
def getType(self):
'''
Return SQL type
:return: SQL type of colum in upper case
:rtype: String
'''
return ColDataType().toString(self.colDataType).upper()
def getSQLDefSnippet(self):
'''
Return string snippet to use in SQL CREATE TABLE or ALTER TABLE
statement
'''
return " %s %s" % (self.getName(), self.getType())
def __str__(self):
return "<Col %s: %s (position %s)>" % (self.colName,
self.getType(),
self.colPos)
def __repr__(self):
return self.__str__()
class TableSchemas(object):
'''
Repository for the schemas of all tables. A schema is an
array ColumnSpec instances. Each such list is associated with
one relational table. A class var dict holds the schemas for
all tables.
'''
def __init__(self):
self.allSchemas = OrderedDict()
# Add empty schema for main (default) table:
self.allSchemas[None] = OrderedDict()
def __getitem__(self, tableName):
return self.allSchemas[tableName]
def __setitem__(self, tableName, colSpecsDict):
self.allSchemas[tableName] = colSpecsDict
def keys(self):
return self.allSchemas.keys()
def addColSpec(self, tableName, colSpec):
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = {colSpec.getName() : colSpec}
schema = self.allSchemas[tableName]
schema[colSpec.getName()] = colSpec
def addColSpecs(self, tableName, colSpecsDict):
if not isinstance(colSpecsDict, OrderedDict):
raise ValueError("ColumSpec parameter must be a dictionary<ColName,ColumnSpec>")
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = colSpecsDict
schema = self.allSchemas[tableName]
# Change schema to include the new dict:
schema.update(colSpecsDict) |
class ColumnSpec(object): | random_line_split |
output_disposition.py | '''
Created on Sep 14, 2013
@author: paepcke
Modifications:
- Jan 1, 2013: added remove() method to OutputFile
'''
import StringIO
from collections import OrderedDict
import csv
import re
import sys
import os
import tempfile
from col_data_type import ColDataType
class OutputDisposition(object):
'''
Specifications for where completed relation rows
should be deposited, and in which format. Current
output options are to files, and to stdout.
This class is abstract, but make sure the subclasses
invoke this super's __init__() when they are initialized.
Also defined here are available output formats, of
which there are two: CSV, and SQL insert statements AND
CSV.
NOTE: currently the CSV-only format option is broken. Not
enough time to maintain it.
SQL insert statements that are directed to files will also
generate equivalent .csv files. The insert statement files
will look like the result of a mysqldump, and inserts into
different tables are mixed. The corresponding (values-only)
csv files are split: one file for each table.
'''
def __init__(self, outputFormat, outputDestObj=None):
'''
:param outputDestObj: instance of one of the subclasses
:type outputDestObj: Subclass(OutputDisposition)
'''
self.outputFormat = outputFormat
if outputDestObj is None:
self.outputDest = self
else:
self.outputDest = outputDestObj
self.csvTableFiles = {}
self.schemas = TableSchemas()
def __enter__(self):
return self.outputDest
def __exit__(self,excType, excValue, excTraceback):
try:
self.outputDest.close()
except:
# If the conversion itself went fine, then
# raise this exception from the closing attempt.
# But if the conversion failed, then have the
# system re-raise that earlier exception:
if excValue is None:
|
# Return False to indicate that if the conversion
# threw an error, the exception should now be re-raised.
# If the conversion worked fine, then this return value
# is ignored.
return False
def flush(self):
self.outputDest.flush()
def getOutputFormat(self):
return self.outputFormat
def addSchemaHints(self, tableName, schemaHints):
'''
Provide a schema hint dict for the table of the given name.
:param tableName: name of table to which schema applies. The name may be None, in which case it refers to the main (default) table.
:type tableName: String
:param schemaHints: dict mapping column names to SQL types via ColumnSpec instances
:type schemaHints: [ordered]Dict<String,ColumnSpec>
'''
self.schemas.addColSpecs(tableName, schemaHints)
def getSchemaHint(self, colName, tableName):
'''
Given a column name, and a table name, return the ColumnSpec object
that describes that column. If tableName is None, the main (default)
table's schema will be searched for a colName entry
:param colName: name of column whose schema info is sought
:type colName: String
:param tableName: name of table in which the given column resides
:type tableName: String
:return: list of ColumnSpec instances
:rtype: (ColumnSpec)
@raise KeyError: if table or column are not found
'''
return self.schemas[tableName][colName]
def getSchemaHintByPos(self, pos, tableName):
try:
return self.schemas[tableName].values()[pos]
except ValueError:
return None
except IndexError:
raise ValueError("Attempt to access pos %s in schema for table %s, which is shorter than %s: %s") %\
(str(pos), tableName, str(pos), self.schemas[tableName].values())
def getSchema(self, tableName):
try:
return self.schemas[tableName].values()
except ValueError:
return None
def copySchemas(self, destDisposition):
'''
Given another instance of OutputDisposition,
copy this instance's schemas to the destination.
:param destDisposition: another instance of OutputDisposition
:type destDisposition: OutputDisposition
'''
destDisposition.schemas = self.schemas
def ensureColExistence(self, colName, colDataType, jsonToRelationConverter, tableName=None):
'''
Given a column name and MySQL datatype name, check whether this
column has previously been encountered. If not, a column information
object is created, which will eventually be used to create the column
header, or SQL alter statements.
:param colName: name of the column to consider
:type colName: String
:param colDataType: datatype of the column.
:type colDataType: ColDataType
:param tableName: name of table to which the column is to belong; None if for main table
:type tableName: {String | None}
'''
schemaDict = self.schemas[tableName]
if schemaDict is None or len(schemaDict) == 0:
# schema for this table definitely does not have the column:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
self.schemas[tableName] = OrderedDict({colName : colSpecObj})
return
# Have schema (dict) for this table. Does that dict contain
# an entry for the col name?
try:
schemaDict[colName]
# all set:
return
except KeyError:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
schemaDict[colName] = colSpecObj
def createTmpTableFile(self, tableName, fileSuffix):
'''
Used for cases in which parsers must create more than one
table. Those tables need to be written to disk, even when
output of the main table is piped.
:param tableName: name by which the table file obj can be retrieved
:type tableName: String
:param fileSuffix: suffix for temp file name. Ex. 'csv' for CSV outputs, or 'sql' for SQL dumps
:type fileSuffix: String
:return: file object open for writing
:rtype: File
'''
self.csvTableFiles[tableName] = tempfile.NamedTemporaryFile(prefix='tmpTable',
suffix=fileSuffix)
return self.csvTableFiles[tableName]
#--------------------- Available Output Formats
class OutputFormat():
CSV = 0
SQL_INSERT_STATEMENTS = 1
SQL_INSERTS_AND_CSV = 2
#--------------------- Available Output Destination Options:
class OutputPipe(OutputDisposition):
def __init__(self, outputFormat):
super(OutputPipe, self).__init__(outputFormat)
self.fileHandle = sys.stdout
# Make file name accessible as property just like
# Python file objects do:
self.name = "<stdout>" # @UnusedVariable
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
self.tableCSVWriters = {}
def close(self):
pass # don't close stdout
def flush(self):
sys.stdout.flush()
def __str__(self):
return "<OutputPipe:<stdout>"
def writerow(self, colElementArray, tableName=None):
# For CSV: make sure everything is a string:
if self.outputFormat == OutputDisposition.OutputFormat.CSV:
row = map(str,colElementArray)
if tableName is None:
self.csvWriter.writerow(row)
else:
self.tableCSVWriters[tableName].writerow(row)
else:
print(colElementArray)
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table.
:param schemaHintsNewTable:
:type schemaHintsNewTable:
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
tmpTableFile = self.createTmpTableFile(tableName, 'csv')
self.tableCSVWriters[tableName] = csv.writer(tmpTableFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
sys.stdout.write(whatToWrite)
sys.stdout.flush()
def getCSVTableOutFileName(self, tableName):
return self.name
class OutputFile(OutputDisposition):
# When looking at INSERT INTO tableName (...,
# grab 'tableName':
TABLE_NAME_PATTERN = re.compile(r'[^\s]*\s[^\s]*\s([^\s]*)\s')
# When looking at:" ('7a286e24_b578_4741_b6e0_c0e8596bd456','Mozil...);\n"
# grab everything inside the parens, including the trailing ');\n', which
# we'll cut out in the code:
VALUES_PATTERN = re.compile(r'^[\s]{4}\(([^\n]*)\n{0,1}')
def __init__(self, fileName, outputFormat, options='ab'):
'''
Create instance of an output file destination for converted log files.
Such an instance is created both for OutputFormat.SQL_INSERT_STATEMENTS and
for OutputFormat.CSV. In the Insert statements case the fileName is the file
where all INSERT statements are placed; i.e. the entire dump. If the output format
is CSV, then the fileName is a prefix for the file names of each generated CSV file
(one file for each table).
:param fileName: fully qualified name of output file for CSV (in case of CSV-only),
or MySQL INSERT statement dump
:type fileName: String
:param outputFormat: whether to output CSV or MySQL INSERT statements
:type outputFormat: OutputDisposition.OutputFormat
:param options: output file options as per Python built-in 'open()'. Defaults to append/binary. The
latter for compatibility with Windows
:type options: String
'''
super(OutputFile, self).__init__(outputFormat)
# Make file name accessible as property just like
# Python file objects do:
self.name = fileName # @UnusedVariable
self.outputFormat = outputFormat
# Open the output file as 'append' and 'binary'
# The latter is needed for Windows.
self.fileHandle = open(fileName, options)
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if outputFormat == OutputDisposition.OutputFormat.CSV or\
outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
# Prepare for CSV files needed for the tables:
self.tableCSVWriters = {}
def close(self):
self.fileHandle.close()
# Also close any CSV out files that might exist:
try:
for csvFD in self.csvTableFiles.values():
csvFD.close()
except:
pass
def flush(self):
self.fileHandle.flush()
for csvFD in self.tableCSVWriters.values():
try:
csvFD.flush()
except:
pass
def remove(self):
try:
os.remove(self.fileHandle.name)
except:
pass
def __str__(self):
return "<OutputFile:%s>" % self.getFileName()
def getFileName(self, tableName=None):
'''
Get file name of a MySQL INSERT statement outfile,
or, given a table name, the name of the outfile
for CSV destined to the given table.
:param tableName:
:type tableName:
'''
if tableName is None:
return self.name
else:
fd = self.csvTableFiles.get(tableName, None)
if fd is None:
return None
return fd.name
def writerow(self, colElementArray, tableName=None):
'''
How I wish Python had parameter type based polymorphism. Life
would be so much cleaner.
ColElementArray is either an array of values (coming from
a CSV-only parser), or a string that contains a complete
MySQL INSERT statement (from MySQL dump-creating parsers).
In the first case, we ensure all elements in the array are
strings, and write to output. In the latter case we write
the INSERT statements to their output file. Then, if output
format is SQL_INSERTS_AND_CSV, we also extract the MySQL
values and write them to the proper CSV file.
:param colElementArray: either a MySQL INSERT statement, or an array of values
:type colElementArray: {String | [string]}
:param tableName: name of table to which output is destined. Only needed for
value arrays from CSV-only parsers. Their value arrays don't contain
info on the destination table. INSERT statements do contain the destination table
name.
:type tableName: String
'''
if isinstance(colElementArray, list):
# Simple CSV array of values;
# make sure every array element is a string:
row = map(str,colElementArray)
if tableName is None:
# The main (and maybe only) table:
self.csvWriter.writerow(row)
else:
# One of the other tables for which files
# were opened during calls to startNewTable():
self.tableCSVWriters[tableName].writerow(row)
else:
# We are either outputting INSERT statements, or
# both those and CSV, or just CSV derived from a
# full MySQL INSERT parser, like edxTrackLogJSONParser.
# Start with the INSERTS:
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.fileHandle.write(colElementArray + '\n')
# If we are outputting either CSV or INSERTs and CSV, do the CSV
# part now:
if self.outputFormat != OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
# Strip the CSV parts out from the INSERT statement, which may
# contain multiple VALUE statements:
self.writeCSVRowsFromInsertStatement(colElementArray)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
self.fileHandle.write(whatToWrite)
self.fileHandle.flush()
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table (in case of CSV-Only), or any table
in case of SQLInsert+CSV.
:param tableName: name of new table
:type tableName: string
:param schemaHintsNewTable: map column name to column SQL type
:type schemaHintsNewTable: {String,ColDataType}
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
return
# We are producing CSV (possibly in addition to Inserts):
try:
# Already have a table writer for this table?
self.tableCSVWriters[tableName]
return # yep
except KeyError:
# OK, really is a new table caller is starting:
pass
# Ensure that we have an open FD to write to for this table:
if self.outputFormat == OutputDisposition.OutputFormat.CSV or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.ensureOpenCSVOutFileFromTableName(tableName)
def ensureOpenCSVOutFileFromTableName(self, tableName):
'''
Checks whether an open File object exists for the given
table. If not, creates one. Returns the FD. The output
file is created in the same directory as self.out
:param tableName: name of table whose CSV output file we are to check for, or create
:type tableName: String
:return: a File object open for writing/appending
:rtype: File
'''
try:
# If we already have an FD for this table, return:
return self.tableCSVWriters[tableName]
except KeyError:
# Else create one below:
pass
outFileName = self.getFileName()
if outFileName == '/dev/null':
outFile = open('/dev/null', 'ab')
self.csvTableFiles[tableName] = outFile
return outFile
csvOutFileName = self.getCSVTableOutFileName(tableName)
outFile = open(csvOutFileName, 'w')
self.csvTableFiles[tableName] = outFile
self.tableCSVWriters[tableName] = csv.writer(outFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
return self.tableCSVWriters[tableName]
def getCSVTableOutFileName(self, tableName):
# The 'None' below ensures that we get the
# main file's name back:
return "%s_%sTable.csv" % (self.getFileName(None), tableName)
def writeCSVRowsFromInsertStatement(self, insertStatement):
'''
Takes one SQL INSERT INTO Statement, possibly including multiple VALUES
lines. Extracts the destination table and the values list(s), and writes
them to disk via the appropriate CSVWriter. The INSERT statements are
expected to be very regular, generated by json_to_relation. Don't use
this method for arbitrary INSERT statements, b/c it relies on regular
expressions that expect the specific format. Prerequisite: self.tableCSVWriters
is a dictionary that maps table names into File objects that are open
for writing.
:param insertStatement: Well-formed MySQL INSERT statement
:type insertStatement: String
@raise ValueError: if table name could not be extracted from the
INSERT statement, or if the insertStatement contains no VALUES
clause.
'''
inFD = StringIO.StringIO(insertStatement)
try:
firstLine = inFD.readline()
# Pick out the name of the table to which CSV is to be added:
tblNameMatch = OutputFile.TABLE_NAME_PATTERN.search(firstLine)
if tblNameMatch is None:
raise ValueError('No match when trying to extract table name from "%s"' % insertStatement)
tblName = tblNameMatch.group(1)
except IndexError:
raise ValueError('Could not extract table name from "%s"' % insertStatement)
readAllValueTuples = False
while not readAllValueTuples:
# Get values list that belongs to this insert statement:
valuesLine = inFD.readline()
if not valuesLine.startswith(' ('):
readAllValueTuples = True
continue
# Extract the comma-separated values list out from the parens;
# first get "'fasdrew_fdsaf...',...);\n":
oneValuesLineMatch = OutputFile.VALUES_PATTERN.search(valuesLine)
if oneValuesLineMatch is None:
# Hopefully never happens:
raise ValueError('No match for values line "%s"' % insertStatement)
# Get just the comma-separated values list from
# 'abfd_sfd,...);\n
valuesList = oneValuesLineMatch.group(1)[:-2] + '\n'
# Make sure we've seen additions to this table before or,
# if not, have a CSV writer and a file created to receive
# the CSV lines:
self.ensureOpenCSVOutFileFromTableName(tblName)
theOutFd = self.csvTableFiles[tblName]
theOutFd.write(valuesList)
class ColumnSpec(object):
'''
Housekeeping class. Each instance represents the name,
position, and datatype of one column. These instances are
used to generate column name headers, and
SQL insert statements.
'''
def __init__(self, colName, colDataType, jsonToRelationProcessor):
'''
Create a ColumnSpec instance.
:param colName: name of column
:type colName: String
:param colDataType: data type of column (an enum)
:type colDataType: ColumnSpec
:param jsonToRelationProcessor: associated JSON to relation JSONToRelation instance
:type jsonToRelationProcessor: JSONToRelation
'''
self.colName = colName
self.colDataType = colDataType
self.colPos = jsonToRelationProcessor.getNextNewColPos()
jsonToRelationProcessor.bumpNextNewColPos()
def getDefaultValue(self):
return ColDataType().defaultValues[self.colDataType]
def getName(self):
'''
Return column name
:return: name of column
:rtype: String
'''
return self.colName
def getType(self):
'''
Return SQL type
:return: SQL type of colum in upper case
:rtype: String
'''
return ColDataType().toString(self.colDataType).upper()
def getSQLDefSnippet(self):
'''
Return string snippet to use in SQL CREATE TABLE or ALTER TABLE
statement
'''
return " %s %s" % (self.getName(), self.getType())
def __str__(self):
return "<Col %s: %s (position %s)>" % (self.colName,
self.getType(),
self.colPos)
def __repr__(self):
return self.__str__()
class TableSchemas(object):
'''
Repository for the schemas of all tables. A schema is an
array ColumnSpec instances. Each such list is associated with
one relational table. A class var dict holds the schemas for
all tables.
'''
def __init__(self):
self.allSchemas = OrderedDict()
# Add empty schema for main (default) table:
self.allSchemas[None] = OrderedDict()
def __getitem__(self, tableName):
return self.allSchemas[tableName]
def __setitem__(self, tableName, colSpecsDict):
self.allSchemas[tableName] = colSpecsDict
def keys(self):
return self.allSchemas.keys()
def addColSpec(self, tableName, colSpec):
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = {colSpec.getName() : colSpec}
schema = self.allSchemas[tableName]
schema[colSpec.getName()] = colSpec
def addColSpecs(self, tableName, colSpecsDict):
if not isinstance(colSpecsDict, OrderedDict):
raise ValueError("ColumSpec parameter must be a dictionary<ColName,ColumnSpec>")
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = colSpecsDict
schema = self.allSchemas[tableName]
# Change schema to include the new dict:
schema.update(colSpecsDict)
| raise IOError("Could not close the output of the conversion: %s" % sys.exc_info()[0]) | conditional_block |
output_disposition.py | '''
Created on Sep 14, 2013
@author: paepcke
Modifications:
- Jan 1, 2013: added remove() method to OutputFile
'''
import StringIO
from collections import OrderedDict
import csv
import re
import sys
import os
import tempfile
from col_data_type import ColDataType
class OutputDisposition(object):
'''
Specifications for where completed relation rows
should be deposited, and in which format. Current
output options are to files, and to stdout.
This class is abstract, but make sure the subclasses
invoke this super's __init__() when they are initialized.
Also defined here are available output formats, of
which there are two: CSV, and SQL insert statements AND
CSV.
NOTE: currently the CSV-only format option is broken. Not
enough time to maintain it.
SQL insert statements that are directed to files will also
generate equivalent .csv files. The insert statement files
will look like the result of a mysqldump, and inserts into
different tables are mixed. The corresponding (values-only)
csv files are split: one file for each table.
'''
def __init__(self, outputFormat, outputDestObj=None):
'''
:param outputDestObj: instance of one of the subclasses
:type outputDestObj: Subclass(OutputDisposition)
'''
self.outputFormat = outputFormat
if outputDestObj is None:
self.outputDest = self
else:
self.outputDest = outputDestObj
self.csvTableFiles = {}
self.schemas = TableSchemas()
def __enter__(self):
return self.outputDest
def __exit__(self,excType, excValue, excTraceback):
try:
self.outputDest.close()
except:
# If the conversion itself went fine, then
# raise this exception from the closing attempt.
# But if the conversion failed, then have the
# system re-raise that earlier exception:
if excValue is None:
raise IOError("Could not close the output of the conversion: %s" % sys.exc_info()[0])
# Return False to indicate that if the conversion
# threw an error, the exception should now be re-raised.
# If the conversion worked fine, then this return value
# is ignored.
return False
def flush(self):
self.outputDest.flush()
def getOutputFormat(self):
return self.outputFormat
def addSchemaHints(self, tableName, schemaHints):
|
def getSchemaHint(self, colName, tableName):
'''
Given a column name, and a table name, return the ColumnSpec object
that describes that column. If tableName is None, the main (default)
table's schema will be searched for a colName entry
:param colName: name of column whose schema info is sought
:type colName: String
:param tableName: name of table in which the given column resides
:type tableName: String
:return: list of ColumnSpec instances
:rtype: (ColumnSpec)
@raise KeyError: if table or column are not found
'''
return self.schemas[tableName][colName]
def getSchemaHintByPos(self, pos, tableName):
try:
return self.schemas[tableName].values()[pos]
except ValueError:
return None
except IndexError:
raise ValueError("Attempt to access pos %s in schema for table %s, which is shorter than %s: %s") %\
(str(pos), tableName, str(pos), self.schemas[tableName].values())
def getSchema(self, tableName):
try:
return self.schemas[tableName].values()
except ValueError:
return None
def copySchemas(self, destDisposition):
'''
Given another instance of OutputDisposition,
copy this instance's schemas to the destination.
:param destDisposition: another instance of OutputDisposition
:type destDisposition: OutputDisposition
'''
destDisposition.schemas = self.schemas
def ensureColExistence(self, colName, colDataType, jsonToRelationConverter, tableName=None):
'''
Given a column name and MySQL datatype name, check whether this
column has previously been encountered. If not, a column information
object is created, which will eventually be used to create the column
header, or SQL alter statements.
:param colName: name of the column to consider
:type colName: String
:param colDataType: datatype of the column.
:type colDataType: ColDataType
:param tableName: name of table to which the column is to belong; None if for main table
:type tableName: {String | None}
'''
schemaDict = self.schemas[tableName]
if schemaDict is None or len(schemaDict) == 0:
# schema for this table definitely does not have the column:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
self.schemas[tableName] = OrderedDict({colName : colSpecObj})
return
# Have schema (dict) for this table. Does that dict contain
# an entry for the col name?
try:
schemaDict[colName]
# all set:
return
except KeyError:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
schemaDict[colName] = colSpecObj
def createTmpTableFile(self, tableName, fileSuffix):
'''
Used for cases in which parsers must create more than one
table. Those tables need to be written to disk, even when
output of the main table is piped.
:param tableName: name by which the table file obj can be retrieved
:type tableName: String
:param fileSuffix: suffix for temp file name. Ex. 'csv' for CSV outputs, or 'sql' for SQL dumps
:type fileSuffix: String
:return: file object open for writing
:rtype: File
'''
self.csvTableFiles[tableName] = tempfile.NamedTemporaryFile(prefix='tmpTable',
suffix=fileSuffix)
return self.csvTableFiles[tableName]
#--------------------- Available Output Formats
class OutputFormat():
CSV = 0
SQL_INSERT_STATEMENTS = 1
SQL_INSERTS_AND_CSV = 2
#--------------------- Available Output Destination Options:
class OutputPipe(OutputDisposition):
def __init__(self, outputFormat):
super(OutputPipe, self).__init__(outputFormat)
self.fileHandle = sys.stdout
# Make file name accessible as property just like
# Python file objects do:
self.name = "<stdout>" # @UnusedVariable
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
self.tableCSVWriters = {}
def close(self):
pass # don't close stdout
def flush(self):
sys.stdout.flush()
def __str__(self):
return "<OutputPipe:<stdout>"
def writerow(self, colElementArray, tableName=None):
# For CSV: make sure everything is a string:
if self.outputFormat == OutputDisposition.OutputFormat.CSV:
row = map(str,colElementArray)
if tableName is None:
self.csvWriter.writerow(row)
else:
self.tableCSVWriters[tableName].writerow(row)
else:
print(colElementArray)
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table.
:param schemaHintsNewTable:
:type schemaHintsNewTable:
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
tmpTableFile = self.createTmpTableFile(tableName, 'csv')
self.tableCSVWriters[tableName] = csv.writer(tmpTableFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
sys.stdout.write(whatToWrite)
sys.stdout.flush()
def getCSVTableOutFileName(self, tableName):
return self.name
class OutputFile(OutputDisposition):
# When looking at INSERT INTO tableName (...,
# grab 'tableName':
TABLE_NAME_PATTERN = re.compile(r'[^\s]*\s[^\s]*\s([^\s]*)\s')
# When looking at:" ('7a286e24_b578_4741_b6e0_c0e8596bd456','Mozil...);\n"
# grab everything inside the parens, including the trailing ');\n', which
# we'll cut out in the code:
VALUES_PATTERN = re.compile(r'^[\s]{4}\(([^\n]*)\n{0,1}')
def __init__(self, fileName, outputFormat, options='ab'):
'''
Create instance of an output file destination for converted log files.
Such an instance is created both for OutputFormat.SQL_INSERT_STATEMENTS and
for OutputFormat.CSV. In the Insert statements case the fileName is the file
where all INSERT statements are placed; i.e. the entire dump. If the output format
is CSV, then the fileName is a prefix for the file names of each generated CSV file
(one file for each table).
:param fileName: fully qualified name of output file for CSV (in case of CSV-only),
or MySQL INSERT statement dump
:type fileName: String
:param outputFormat: whether to output CSV or MySQL INSERT statements
:type outputFormat: OutputDisposition.OutputFormat
:param options: output file options as per Python built-in 'open()'. Defaults to append/binary. The
latter for compatibility with Windows
:type options: String
'''
super(OutputFile, self).__init__(outputFormat)
# Make file name accessible as property just like
# Python file objects do:
self.name = fileName # @UnusedVariable
self.outputFormat = outputFormat
# Open the output file as 'append' and 'binary'
# The latter is needed for Windows.
self.fileHandle = open(fileName, options)
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if outputFormat == OutputDisposition.OutputFormat.CSV or\
outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
# Prepare for CSV files needed for the tables:
self.tableCSVWriters = {}
def close(self):
self.fileHandle.close()
# Also close any CSV out files that might exist:
try:
for csvFD in self.csvTableFiles.values():
csvFD.close()
except:
pass
def flush(self):
self.fileHandle.flush()
for csvFD in self.tableCSVWriters.values():
try:
csvFD.flush()
except:
pass
def remove(self):
try:
os.remove(self.fileHandle.name)
except:
pass
def __str__(self):
return "<OutputFile:%s>" % self.getFileName()
def getFileName(self, tableName=None):
'''
Get file name of a MySQL INSERT statement outfile,
or, given a table name, the name of the outfile
for CSV destined to the given table.
:param tableName:
:type tableName:
'''
if tableName is None:
return self.name
else:
fd = self.csvTableFiles.get(tableName, None)
if fd is None:
return None
return fd.name
def writerow(self, colElementArray, tableName=None):
'''
How I wish Python had parameter type based polymorphism. Life
would be so much cleaner.
ColElementArray is either an array of values (coming from
a CSV-only parser), or a string that contains a complete
MySQL INSERT statement (from MySQL dump-creating parsers).
In the first case, we ensure all elements in the array are
strings, and write to output. In the latter case we write
the INSERT statements to their output file. Then, if output
format is SQL_INSERTS_AND_CSV, we also extract the MySQL
values and write them to the proper CSV file.
:param colElementArray: either a MySQL INSERT statement, or an array of values
:type colElementArray: {String | [string]}
:param tableName: name of table to which output is destined. Only needed for
value arrays from CSV-only parsers. Their value arrays don't contain
info on the destination table. INSERT statements do contain the destination table
name.
:type tableName: String
'''
if isinstance(colElementArray, list):
# Simple CSV array of values;
# make sure every array element is a string:
row = map(str,colElementArray)
if tableName is None:
# The main (and maybe only) table:
self.csvWriter.writerow(row)
else:
# One of the other tables for which files
# were opened during calls to startNewTable():
self.tableCSVWriters[tableName].writerow(row)
else:
# We are either outputting INSERT statements, or
# both those and CSV, or just CSV derived from a
# full MySQL INSERT parser, like edxTrackLogJSONParser.
# Start with the INSERTS:
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.fileHandle.write(colElementArray + '\n')
# If we are outputting either CSV or INSERTs and CSV, do the CSV
# part now:
if self.outputFormat != OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
# Strip the CSV parts out from the INSERT statement, which may
# contain multiple VALUE statements:
self.writeCSVRowsFromInsertStatement(colElementArray)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
self.fileHandle.write(whatToWrite)
self.fileHandle.flush()
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table (in case of CSV-Only), or any table
in case of SQLInsert+CSV.
:param tableName: name of new table
:type tableName: string
:param schemaHintsNewTable: map column name to column SQL type
:type schemaHintsNewTable: {String,ColDataType}
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
return
# We are producing CSV (possibly in addition to Inserts):
try:
# Already have a table writer for this table?
self.tableCSVWriters[tableName]
return # yep
except KeyError:
# OK, really is a new table caller is starting:
pass
# Ensure that we have an open FD to write to for this table:
if self.outputFormat == OutputDisposition.OutputFormat.CSV or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.ensureOpenCSVOutFileFromTableName(tableName)
def ensureOpenCSVOutFileFromTableName(self, tableName):
'''
Checks whether an open File object exists for the given
table. If not, creates one. Returns the FD. The output
file is created in the same directory as self.out
:param tableName: name of table whose CSV output file we are to check for, or create
:type tableName: String
:return: a File object open for writing/appending
:rtype: File
'''
try:
# If we already have an FD for this table, return:
return self.tableCSVWriters[tableName]
except KeyError:
# Else create one below:
pass
outFileName = self.getFileName()
if outFileName == '/dev/null':
outFile = open('/dev/null', 'ab')
self.csvTableFiles[tableName] = outFile
return outFile
csvOutFileName = self.getCSVTableOutFileName(tableName)
outFile = open(csvOutFileName, 'w')
self.csvTableFiles[tableName] = outFile
self.tableCSVWriters[tableName] = csv.writer(outFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
return self.tableCSVWriters[tableName]
def getCSVTableOutFileName(self, tableName):
# The 'None' below ensures that we get the
# main file's name back:
return "%s_%sTable.csv" % (self.getFileName(None), tableName)
def writeCSVRowsFromInsertStatement(self, insertStatement):
'''
Takes one SQL INSERT INTO Statement, possibly including multiple VALUES
lines. Extracts the destination table and the values list(s), and writes
them to disk via the appropriate CSVWriter. The INSERT statements are
expected to be very regular, generated by json_to_relation. Don't use
this method for arbitrary INSERT statements, b/c it relies on regular
expressions that expect the specific format. Prerequisite: self.tableCSVWriters
is a dictionary that maps table names into File objects that are open
for writing.
:param insertStatement: Well-formed MySQL INSERT statement
:type insertStatement: String
@raise ValueError: if table name could not be extracted from the
INSERT statement, or if the insertStatement contains no VALUES
clause.
'''
inFD = StringIO.StringIO(insertStatement)
try:
firstLine = inFD.readline()
# Pick out the name of the table to which CSV is to be added:
tblNameMatch = OutputFile.TABLE_NAME_PATTERN.search(firstLine)
if tblNameMatch is None:
raise ValueError('No match when trying to extract table name from "%s"' % insertStatement)
tblName = tblNameMatch.group(1)
except IndexError:
raise ValueError('Could not extract table name from "%s"' % insertStatement)
readAllValueTuples = False
while not readAllValueTuples:
# Get values list that belongs to this insert statement:
valuesLine = inFD.readline()
if not valuesLine.startswith(' ('):
readAllValueTuples = True
continue
# Extract the comma-separated values list out from the parens;
# first get "'fasdrew_fdsaf...',...);\n":
oneValuesLineMatch = OutputFile.VALUES_PATTERN.search(valuesLine)
if oneValuesLineMatch is None:
# Hopefully never happens:
raise ValueError('No match for values line "%s"' % insertStatement)
# Get just the comma-separated values list from
# 'abfd_sfd,...);\n
valuesList = oneValuesLineMatch.group(1)[:-2] + '\n'
# Make sure we've seen additions to this table before or,
# if not, have a CSV writer and a file created to receive
# the CSV lines:
self.ensureOpenCSVOutFileFromTableName(tblName)
theOutFd = self.csvTableFiles[tblName]
theOutFd.write(valuesList)
class ColumnSpec(object):
'''
Housekeeping class. Each instance represents the name,
position, and datatype of one column. These instances are
used to generate column name headers, and
SQL insert statements.
'''
def __init__(self, colName, colDataType, jsonToRelationProcessor):
'''
Create a ColumnSpec instance.
:param colName: name of column
:type colName: String
:param colDataType: data type of column (an enum)
:type colDataType: ColumnSpec
:param jsonToRelationProcessor: associated JSON to relation JSONToRelation instance
:type jsonToRelationProcessor: JSONToRelation
'''
self.colName = colName
self.colDataType = colDataType
self.colPos = jsonToRelationProcessor.getNextNewColPos()
jsonToRelationProcessor.bumpNextNewColPos()
def getDefaultValue(self):
return ColDataType().defaultValues[self.colDataType]
def getName(self):
'''
Return column name
:return: name of column
:rtype: String
'''
return self.colName
def getType(self):
'''
Return SQL type
:return: SQL type of colum in upper case
:rtype: String
'''
return ColDataType().toString(self.colDataType).upper()
def getSQLDefSnippet(self):
'''
Return string snippet to use in SQL CREATE TABLE or ALTER TABLE
statement
'''
return " %s %s" % (self.getName(), self.getType())
def __str__(self):
return "<Col %s: %s (position %s)>" % (self.colName,
self.getType(),
self.colPos)
def __repr__(self):
return self.__str__()
class TableSchemas(object):
'''
Repository for the schemas of all tables. A schema is an
array ColumnSpec instances. Each such list is associated with
one relational table. A class var dict holds the schemas for
all tables.
'''
def __init__(self):
self.allSchemas = OrderedDict()
# Add empty schema for main (default) table:
self.allSchemas[None] = OrderedDict()
def __getitem__(self, tableName):
return self.allSchemas[tableName]
def __setitem__(self, tableName, colSpecsDict):
self.allSchemas[tableName] = colSpecsDict
def keys(self):
return self.allSchemas.keys()
def addColSpec(self, tableName, colSpec):
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = {colSpec.getName() : colSpec}
schema = self.allSchemas[tableName]
schema[colSpec.getName()] = colSpec
def addColSpecs(self, tableName, colSpecsDict):
if not isinstance(colSpecsDict, OrderedDict):
raise ValueError("ColumSpec parameter must be a dictionary<ColName,ColumnSpec>")
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = colSpecsDict
schema = self.allSchemas[tableName]
# Change schema to include the new dict:
schema.update(colSpecsDict)
| '''
Provide a schema hint dict for the table of the given name.
:param tableName: name of table to which schema applies. The name may be None, in which case it refers to the main (default) table.
:type tableName: String
:param schemaHints: dict mapping column names to SQL types via ColumnSpec instances
:type schemaHints: [ordered]Dict<String,ColumnSpec>
'''
self.schemas.addColSpecs(tableName, schemaHints) | identifier_body |
test-examples.js | const path = require('path')
const execa = require('execa')
const logger = require('./lib/logger')
const { exampleAppsToRun } = require('./lib/paths')
const { createBundle } = require('./lib/bundle')
const bundle = createBundle()
const executeTest = (projectPath) => {
// we change current directory
process.chdir(projectPath)
// reading package.json
const projectPkg = require(path.join(projectPath, 'package.json'))
if (!projectPkg.name) projectPkg.name = 'unknown'
if (!projectPkg.version) projectPkg.version = 'unknown'
logger.log()
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
logger.log('='.repeat(20), `${projectPkg.name}@${projectPkg.version}`, 'in', projectPath, '='.repeat(20))
logger.log() |
// then we install it in the repo
logger.log('ensuring all dependencies of target project are installed')
logger.log()
execa.sync('npm', ['ci'], { cwd: projectPath })
logger.log()
logger.log('installing bundled version of ts-jest')
logger.log()
execa.sync('npm', ['install', '--no-package-lock', '--no-shrinkwrap', '--no-save', bundle], { cwd: projectPath })
logger.log()
// then we can run the tests
const cmdLine = ['npm', 'run', 'test']
const cmdIsolatedLine = ['npm', 'run', 'test-isolated']
const cmdESMLine = ['npm', 'run', 'test-esm']
const cmdESMIsolatedLine = ['npm', 'run', 'test-esm-isolated']
logger.log('starting the CommonJS tests with `isolatedModules: false` using:', ...cmdLine)
logger.log()
execa.sync(cmdLine.shift(), cmdLine, {
cwd: projectPath,
stdio: 'inherit',
env: process.env,
})
logger.log()
logger.log('starting the CommonJS tests with `isolatedModules: true` using:', ...cmdIsolatedLine)
logger.log()
execa.sync(cmdIsolatedLine.shift(), cmdIsolatedLine, {
cwd: projectPath,
stdio: 'inherit',
env: process.env,
})
logger.log()
logger.log('starting the ESM tests with `isolatedModules: false` using:', ...cmdESMLine)
logger.log()
execa.sync(cmdESMLine.shift(), cmdESMLine, {
cwd: projectPath,
stdio: 'inherit',
env: process.env,
})
logger.log()
logger.log('starting the ESM tests with `isolatedModules: true` using:', ...cmdESMIsolatedLine)
logger.log()
execa.sync(cmdESMIsolatedLine.shift(), cmdESMIsolatedLine, {
cwd: projectPath,
stdio: 'inherit',
env: process.env,
})
logger.log()
}
exampleAppsToRun.forEach((projectPath) => {
executeTest(projectPath)
}) | random_line_split |
|
htmlhrelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLHRElementBinding::{self, HTMLHRElementMethods};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::attr::{AttrValue, LengthOrPercentageOrAuto};
#[dom_struct] | }
impl HTMLHRElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHRElement {
HTMLHRElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHRElement> {
Node::reflect_node(
Box::new(HTMLHRElement::new_inherited(local_name, prefix, document)),
document,
HTMLHRElementBinding::Wrap,
)
}
}
impl HTMLHRElementMethods for HTMLHRElement {
// https://html.spec.whatwg.org/multipage/#dom-hr-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-hr-align
make_atomic_setter!(SetAlign, "align");
// https://html.spec.whatwg.org/multipage/#dom-hr-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-hr-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-hr-width
make_getter!(Width, "width");
// https://html.spec.whatwg.org/multipage/#dom-hr-width
make_dimension_setter!(SetWidth, "width");
}
pub trait HTMLHRLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_width(&self) -> LengthOrPercentageOrAuto;
}
impl HTMLHRLayoutHelpers for LayoutDom<HTMLHRElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_width(&self) -> LengthOrPercentageOrAuto {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("width"))
.map(AttrValue::as_dimension)
.cloned()
.unwrap_or(LengthOrPercentageOrAuto::Auto)
}
}
}
impl VirtualMethods for HTMLHRElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("align") => AttrValue::from_dimension(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()),
&local_name!("width") => AttrValue::from_dimension(value.into()),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(name, value),
}
}
} | pub struct HTMLHRElement {
htmlelement: HTMLElement, | random_line_split |
htmlhrelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLHRElementBinding::{self, HTMLHRElementMethods};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::attr::{AttrValue, LengthOrPercentageOrAuto};
#[dom_struct]
pub struct HTMLHRElement {
htmlelement: HTMLElement,
}
impl HTMLHRElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHRElement {
HTMLHRElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHRElement> {
Node::reflect_node(
Box::new(HTMLHRElement::new_inherited(local_name, prefix, document)),
document,
HTMLHRElementBinding::Wrap,
)
}
}
impl HTMLHRElementMethods for HTMLHRElement {
// https://html.spec.whatwg.org/multipage/#dom-hr-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-hr-align
make_atomic_setter!(SetAlign, "align");
// https://html.spec.whatwg.org/multipage/#dom-hr-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-hr-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-hr-width
make_getter!(Width, "width");
// https://html.spec.whatwg.org/multipage/#dom-hr-width
make_dimension_setter!(SetWidth, "width");
}
pub trait HTMLHRLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_width(&self) -> LengthOrPercentageOrAuto;
}
impl HTMLHRLayoutHelpers for LayoutDom<HTMLHRElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn | (&self) -> LengthOrPercentageOrAuto {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("width"))
.map(AttrValue::as_dimension)
.cloned()
.unwrap_or(LengthOrPercentageOrAuto::Auto)
}
}
}
impl VirtualMethods for HTMLHRElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("align") => AttrValue::from_dimension(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()),
&local_name!("width") => AttrValue::from_dimension(value.into()),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(name, value),
}
}
}
| get_width | identifier_name |
BreakfastDiningRounded.js | import createSvgIcon from './utils/createSvgIcon';
import { jsx as _jsx } from "react/jsx-runtime";
export default createSvgIcon( /*#__PURE__*/_jsx("path", {
d: "M18 3H6C3.79 3 2 4.79 2 7c0 1.48.81 2.75 2 3.45V19c0 1.1.9 2 2 2h12c1.1 0 2-.9 2-2v-8.55c1.19-.69 2-1.97 2-3.45 0-2.21-1.79-4-4-4zm-2.29 10.7-3 3c-.39.39-1.02.39-1.42 0l-3-3a.9959.9959 0 0 1 0-1.41l3-3c.39-.39 1.02-.39 1.41 0l3 3c.4.39.4 1.02.01 1.41z" | }), 'BreakfastDiningRounded'); | random_line_split |
|
query-result-stores.js | /* eslint-disable no-await-in-loop */
const assert = require('assert');
const TestUtils = require('../utils');
const query1 = `SELECT 1 AS id, 'blue' AS color`;
function wait(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function testBatchToCompletion(config) {
const utils = new TestUtils(config);
await utils.init(true);
const connection = await utils.post('admin', '/api/connections', {
name: 'test connection',
driver: 'sqlite',
data: {
filename: './test/fixtures/sales.sqlite',
},
});
let batch = await utils.post('admin', `/api/batches`, {
connectionId: connection.id,
batchText: query1,
});
while (batch.status !== 'finished' && batch.status !== 'error') {
await wait(25);
batch = await utils.get('admin', `/api/batches/${batch.id}`);
}
const statements = await utils.get(
'admin',
`/api/batches/${batch.id}/statements`
);
const statement1 = statements[0];
let result1 = await utils.get(
'admin',
`/api/statements/${statement1.id}/results`
);
assert.deepEqual(result1, [[1, 'blue']], 'results as expected');
// remove should succeed
await utils.models.statements.removeById(statement1.id);
await utils.get('admin', `/api/statements/${statement1.id}/results`, 404);
}
describe('api/query-result-stores', function () {
it('file', async function () {
return testBatchToCompletion({ queryResultStore: 'file' });
});
it('redis', async function () {
const available = await TestUtils.redisAvailable('redis://localhost:6379');
if (!available || process.env.SKIP_INTEGRATION === 'true') |
return testBatchToCompletion({
queryResultStore: 'redis',
redisUri: 'redis://localhost:6379',
});
});
it('database', async function () {
return testBatchToCompletion({
queryResultStore: 'database',
});
});
it('memory', async function () {
return testBatchToCompletion({
queryResultStore: 'memory',
});
});
});
| {
return this.skip();
} | conditional_block |
query-result-stores.js | /* eslint-disable no-await-in-loop */
const assert = require('assert');
const TestUtils = require('../utils');
const query1 = `SELECT 1 AS id, 'blue' AS color`;
function wait(ms) |
async function testBatchToCompletion(config) {
const utils = new TestUtils(config);
await utils.init(true);
const connection = await utils.post('admin', '/api/connections', {
name: 'test connection',
driver: 'sqlite',
data: {
filename: './test/fixtures/sales.sqlite',
},
});
let batch = await utils.post('admin', `/api/batches`, {
connectionId: connection.id,
batchText: query1,
});
while (batch.status !== 'finished' && batch.status !== 'error') {
await wait(25);
batch = await utils.get('admin', `/api/batches/${batch.id}`);
}
const statements = await utils.get(
'admin',
`/api/batches/${batch.id}/statements`
);
const statement1 = statements[0];
let result1 = await utils.get(
'admin',
`/api/statements/${statement1.id}/results`
);
assert.deepEqual(result1, [[1, 'blue']], 'results as expected');
// remove should succeed
await utils.models.statements.removeById(statement1.id);
await utils.get('admin', `/api/statements/${statement1.id}/results`, 404);
}
describe('api/query-result-stores', function () {
it('file', async function () {
return testBatchToCompletion({ queryResultStore: 'file' });
});
it('redis', async function () {
const available = await TestUtils.redisAvailable('redis://localhost:6379');
if (!available || process.env.SKIP_INTEGRATION === 'true') {
return this.skip();
}
return testBatchToCompletion({
queryResultStore: 'redis',
redisUri: 'redis://localhost:6379',
});
});
it('database', async function () {
return testBatchToCompletion({
queryResultStore: 'database',
});
});
it('memory', async function () {
return testBatchToCompletion({
queryResultStore: 'memory',
});
});
});
| {
return new Promise((resolve) => setTimeout(resolve, ms));
} | identifier_body |
query-result-stores.js | /* eslint-disable no-await-in-loop */
const assert = require('assert');
const TestUtils = require('../utils'); |
const query1 = `SELECT 1 AS id, 'blue' AS color`;
function wait(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function testBatchToCompletion(config) {
const utils = new TestUtils(config);
await utils.init(true);
const connection = await utils.post('admin', '/api/connections', {
name: 'test connection',
driver: 'sqlite',
data: {
filename: './test/fixtures/sales.sqlite',
},
});
let batch = await utils.post('admin', `/api/batches`, {
connectionId: connection.id,
batchText: query1,
});
while (batch.status !== 'finished' && batch.status !== 'error') {
await wait(25);
batch = await utils.get('admin', `/api/batches/${batch.id}`);
}
const statements = await utils.get(
'admin',
`/api/batches/${batch.id}/statements`
);
const statement1 = statements[0];
let result1 = await utils.get(
'admin',
`/api/statements/${statement1.id}/results`
);
assert.deepEqual(result1, [[1, 'blue']], 'results as expected');
// remove should succeed
await utils.models.statements.removeById(statement1.id);
await utils.get('admin', `/api/statements/${statement1.id}/results`, 404);
}
describe('api/query-result-stores', function () {
it('file', async function () {
return testBatchToCompletion({ queryResultStore: 'file' });
});
it('redis', async function () {
const available = await TestUtils.redisAvailable('redis://localhost:6379');
if (!available || process.env.SKIP_INTEGRATION === 'true') {
return this.skip();
}
return testBatchToCompletion({
queryResultStore: 'redis',
redisUri: 'redis://localhost:6379',
});
});
it('database', async function () {
return testBatchToCompletion({
queryResultStore: 'database',
});
});
it('memory', async function () {
return testBatchToCompletion({
queryResultStore: 'memory',
});
});
}); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.