file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
xsp-info.ts
|
/*
Copyright(c) 2015 - 2021 3NSoft Inc.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* This file contains code for working with file headers and (un)packing
* file segments.
* Exported utility should be used inside xsp library.
*/
import { calculateNonce, POLY_LENGTH, NONCE_LENGTH } from '../utils/crypt-utils';
import { assert } from '../utils/assert';
import { makeUint8ArrayCopy } from '../utils/buffer-utils';
export interface SegsInfo {
/**
* Common segment size before encryption. Encrypted segment is poly-bytes
* longer.
* Last segments in segment chains may be smaller than this value.
*/
segSize: number;
/**
* Array with info objects about chains of segments with related nonces.
* This array shall have zero elements, if file is empty.
* If it is an endless file, then the last segment chain is endless.
*/
segChains: SegsChainInfo[];
formatVersion: number;
}
export interface AttrSegInfo {
nonce: Uint8Array;
size: number;
}
export interface FiniteSegsChainInfo {
nonce: Uint8Array;
numOfSegs: number;
lastSegSize: number;
isEndless?: undefined;
}
export interface EndlessSegsChainInfo {
nonce: Uint8Array;
isEndless: true;
}
export type SegsChainInfo = FiniteSegsChainInfo|EndlessSegsChainInfo;
export function headerContentFor(s: SegsInfo, pads: number): Uint8Array {
assert(Number.isInteger(pads) && (pads >= 0));
if ((s.formatVersion === 1)
|| (s.formatVersion === 2)) {
return assembleV1andV2HeaderContent(s, pads);
} else {
throw new Error(`Version ${s.formatVersion} is not known`);
}
}
const V_1_2_CHAIN_LEN_IN_H = 3 + 4 + NONCE_LENGTH;
function assembleV1andV2HeaderContent(s: SegsInfo, pads: number): Uint8Array {
const headerLen = 1 + 2 + V_1_2_CHAIN_LEN_IN_H*(s.segChains.length + pads);
const h = new Uint8Array(headerLen);
let pos = 0;
// 1) version byte
h[pos] = s.formatVersion;
pos += 1;
// 3) segment size in 256 byte units
storeUintIn2Bytes(h, pos, s.segSize >>> 8);
pos += 2;
// 4.1) pads: array h is already initialized to all zeros
pos += V_1_2_CHAIN_LEN_IN_H*pads;
// 4.2) segment chains
for (let i=0; i<s.segChains.length; i+=1) {
const chainInfo = s.segChains[i];
// 4.1) number of segments in the chain
const numOfSegs = (chainInfo.isEndless ?
MAX_SEG_INDEX : chainInfo.numOfSegs);
storeUintIn4Bytes(h, pos, numOfSegs);
pos += 4;
// 4.2) last segment size
const lastSegSize = (chainInfo.isEndless ?
s.segSize : chainInfo.lastSegSize);
storeUintIn3Bytes(h, pos, lastSegSize);
pos += 3;
// 4.3) 1st segment nonce
h.set(chainInfo.nonce, pos);
pos += chainInfo.nonce.length;
}
return h;
}
/**
* @param x
* @param i
* @return unsigned 16-bit integer (2 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom2Bytes(x: Uint8Array, i: number): number {
return (x[i] << 8) | x[i+1];
}
/**
* @param x
* @param i
* @param u is an unsigned 16-bit integer (2 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn2Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 8;
x[i+1] = u;
}
/**
* @param x
* @param i
* @return unsigned 24-bit integer (3 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom3Bytes(x: Uint8Array, i: number): number {
return (x[i] << 16) | (x[i+1] << 8) | x[i+2];
}
/**
* @param x
* @param i
* @param u is an unsigned 24-bit integer (3 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn3Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 16;
x[i+1] = u >>> 8;
x[i+2] = u;
}
/**
* @param x
* @param i
* @return unsigned 32-bit integer (4 bytes), stored big-endian way in x,
* starting at index i.
*/
export function loadUintFrom4Bytes(x: Uint8Array, i: number): number {
// Note that (x << 24) may produce negative number, probably due to
// treating intermediate integer as signed, and pulling sign to resulting
// float number. Hence, we need a bit different operation here.
return x[i]*0x1000000 + ((x[i+1] << 16) | (x[i+2] << 8) | x[i+3]);
}
/**
* @param x
* @param i
* @param u is an unsigned 32-bit integer (4 bytes) to be stored big-endian
* way in x, starting at index i.
*/
export function storeUintIn4Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 24;
x[i+1] = u >>> 16;
x[i+2] = u >>> 8;
x[i+3] = u;
}
export function readSegsInfoFromHeader(h: Uint8Array): SegsInfo {
if (h.length < 1) { throw inputException(``); }
const v = h[0];
if ((v === 1) || (v === 2)) {
return readV1orV2Header(h);
} else {
throw inputException(`Given header version ${v} is not supported`);
}
}
export interface Exception {
runtimeException: true;
type: 'xsp';
msg?: string,
cause?: any;
}
export function makeBaseException(msg?: string, cause?: any): Exception {
return { runtimeException: true, type: 'xsp', msg, cause };
}
export type ExceptionFlag = 'inputParsing' | 'argsOutOfBounds' | 'unknownSeg' |
'concurrentIteration';
export function exception(
flag: ExceptionFlag, msg?: string, cause?: any
): Exception {
const e = makeBaseException(msg, cause);
e[flag] = true;
return e;
}
export function inputException(msg?: string, cause?: any): Exception {
return exception('inputParsing', msg, cause);
}
function readV1orV2Header(h: Uint8Array): SegsInfo {
if (!isV1andV2HeaderLength(h.length)) { throw inputException(
`Header content size ${h.length} doesn't correspond to version 1.`); }
// 1) check version byte
const formatVersion = h[0];
if ((formatVersion !== 1) && (formatVersion !== 2)) { throw inputException(
`Given header version is ${formatVersion} instead of 1 or 2`); }
let pos = 1;
// 3) segment size in 256 byte units
const segSize = loadUintFrom2Bytes(h, pos) << 8;
pos += 2;
// 4) segment chains
const segChains: SegsChainInfo[] = [];
while (pos < h.length) {
// 4.1) number of segments in the chain
const numOfSegs = loadUintFrom4Bytes(h, pos);
pos += 4;
// 4.2) last segment size
const lastSegSize = loadUintFrom3Bytes(h, pos);
pos += 3;
// 4.3) 1st segment nonce
const nonce = makeUint8ArrayCopy(h.subarray(pos, pos+NONCE_LENGTH));
pos += NONCE_LENGTH;
// distinguish between finite and endless segment chains
let chainInfo: SegsChainInfo;
if ((numOfSegs === MAX_SEG_INDEX) && (lastSegSize === segSize)) {
if (pos < h.length) { throw inputException(
`Invalid header: endless segment chain isn't the last.`); }
chainInfo = { isEndless: true, nonce };
} else {
chainInfo = { numOfSegs, lastSegSize, nonce };
}
if (numOfSegs > 0) {
segChains.push(chainInfo);
}
}
return { segChains, segSize, formatVersion };
}
function isV1andV2HeaderLength(len: number): boolean {
len -= (1 + 2);
if (len < 0) { return false; }
if ((len % 31) === 0) { return true; }
len -= 24;
if (len < 0) { return false; }
return ((len % 31) === 0);
}
export interface SegId {
chain: number;
seg: number;
}
interface ChainLocations {
chain: SegsChainInfo;
content: {
start: number;
end?: number;
};
packed: {
start: number;
end?: number;
};
}
export interface LocationInSegment extends SegId {
posInSeg: number;
}
export interface SegmentInfo extends SegId {
/**
* Offset of the packed segment in all of segment bytes.
*/
packedOfs: number;
/**
* Packed segment's length. If segment chain is endless, segment can be
* shorter.
*/
packedLen: number;
/**
* Offset of segment's content in all of content.
*/
contentOfs: number;
/**
* Length of content in this segment. If segment chain is endless, segment
* can be shorter.
*/
contentLen: number;
/**
* This flag's true value indicates that segment's chain is endless.
*/
endlessChain?: true;
}
export class Locations {
private locations: ChainLocations[] = [];
private variant = { num: 0 };
constructor(
private segs: SegsInfo
) {
this.update();
Object.seal(this);
}
update(): void {
this.locations = [];
let contentOffset = 0;
let offset = 0;
for (let chain of this.segs.segChains) {
let chainLocations: ChainLocations;
if (chain.isEndless) {
chainLocations = {
chain,
packed: {
start: offset,
},
content: {
start: contentOffset,
}
};
} else {
const contentLen = (chain.numOfSegs-1)*this.segs.segSize + chain.lastSegSize;
const packedSize = contentLen + chain.numOfSegs*POLY_LENGTH;
chainLocations = {
chain,
packed: {
start: offset,
end: offset + packedSize
},
content: {
start: contentOffset,
end: contentOffset + contentLen
}
};
offset = chainLocations.packed.end!;
contentOffset = chainLocations.content.end!;
}
this.locations.push(chainLocations);
}
this.variant.num += 1;
}
get defaultSegSize(): number {
return this.segs.segSize;
}
get totalSegsLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.packed.end;
}
get finitePartSegsLen(): number {
const totalLen = this.totalSegsLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.packed.end!;
}
get totalContentLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.content.end;
}
get finitePartContentLen(): number {
const totalLen = this.totalContentLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.content.end!;
}
locateContentOfs(contentPosition: number): LocationInSegment {
if (contentPosition < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.content.end === undefined) ? true : (l.content.end > contentPosition)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
contentPosition -= l.content.start;
const seg = Math.floor(contentPosition / this.segs.segSize);
const posInSeg = (contentPosition - seg*this.segs.segSize);
return { chain, seg, posInSeg };
}
locateSegsOfs(segsOfs: number): LocationInSegment {
if (segsOfs < 0) { throw exception('argsOutOfBounds',
"Given segment offset is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.packed.end === undefined) ? true : (l.packed.end > segsOfs)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
segsOfs -= l.packed.start;
const seg = Math.floor(segsOfs / (this.segs.segSize + POLY_LENGTH));
const posInSeg = (segsOfs - seg*(this.segs.segSize + POLY_LENGTH));
return { chain, seg, posInSeg };
}
getChainLocations(
indOrChain: number|SegsChainInfo
): ChainLocations|undefined {
if (typeof indOrChain === 'number') {
return this.locations[indOrChain];
} else {
return this.locations.find(l => (l.chain === indOrChain));
}
}
segmentInfo<T extends SegmentInfo>(
segId: SegId, infoExtender?: InfoExtender<T>
): T {
const l = this.locations[segId.chain];
if (!l) { throw exception('argsOutOfBounds',
`Chain ${segId.chain} is not found`); }
return segmentInfo(
segId.chain, segId.seg, l, this.segs.segSize, infoExtender);
}
segmentInfos<T extends SegmentInfo>(
fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
return segmentInfos(
this.locations, this.segs.segSize, this.variant, fstSeg, infoExtender);
}
segmentNonce(segId: SegId): Uint8Array {
const chain = this.segs.segChains[segId.chain];
if (!chain) { throw exception('unknownSeg'); }
if (chain.isEndless) {
if (segId.seg > MAX_SEG_INDEX) { throw exception('unknownSeg'); }
return calculateNonce(chain.nonce, segId.seg);
} else if (segId.seg < chain.numOfSegs) {
return calculateNonce(chain.nonce, segId.seg);
} else {
throw exception('unknownSeg');
}
}
}
Object.freeze(Locations.prototype);
Object.freeze(Locations);
export const MAX_SEG_INDEX = 0xffffffff;
export type InfoExtender<T extends SegmentInfo> =
(chain: SegsChainInfo, segInd: number, info: T) => T;
function segmentInfo<T extends SegmentInfo>(
chain: number, seg: number, l: ChainLocations, segSize: number,
infoExtender?: InfoExtender<T>
): T {
if (seg < 0) { throw exception('argsOutOfBounds',
`Invalid segment index ${seg}`); }
const contentOfs = l.content.start + seg*segSize;
const packedOfs = l.packed.start + seg*(segSize+POLY_LENGTH);
let s: SegmentInfo;
if (l.chain.isEndless) {
const contentLen = segSize;
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg,
endlessChain: true, packedOfs, packedLen, contentOfs, contentLen };
} else {
if (seg >= l.chain.numOfSegs) { throw exception('argsOutOfBounds',
`Segment ${seg} is not found`); }
const lastSeg = (seg === (l.chain.numOfSegs-1));
const contentLen = (lastSeg ? l.chain.lastSegSize : segSize);
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg, packedOfs, packedLen, contentOfs, contentLen };
}
return (infoExtender ? infoExtender(l.chain, seg, s as T): (s as T));
}
function* segmentInfos<T extends SegmentInfo>(
locations: ChainLocations[], segSize: number, variant: { num: number; }, fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
const initVariant = variant.num;
let fstChainInd = 0;
let fstSegInd = 0;
if (fstSeg) {
fstChainInd = fstSeg.chain;
fstSegInd = fstSeg.seg;
}
for (let chain=fstChainInd; chain<locations.length; chain+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
const l = locations[chain];
const segIndexLimit = (l.chain.isEndless ?
MAX_SEG_INDEX+1 : l.chain.numOfSegs);
for (let seg=fstSegInd; seg<segIndexLimit; seg+=1) {
if (initVariant !== variant.num)
|
yield segmentInfo(chain, seg, l, segSize, infoExtender);
}
fstSegInd = 0;
if (l.chain.isEndless) { throw new Error(
`Generator in endless chain is not supposed to be run till its done, and it has already run all allowed segment index values.`); }
}
}
Object.freeze(exports);
|
{ throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
|
conditional_block
|
xsp-info.ts
|
/*
Copyright(c) 2015 - 2021 3NSoft Inc.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* This file contains code for working with file headers and (un)packing
* file segments.
* Exported utility should be used inside xsp library.
*/
import { calculateNonce, POLY_LENGTH, NONCE_LENGTH } from '../utils/crypt-utils';
import { assert } from '../utils/assert';
import { makeUint8ArrayCopy } from '../utils/buffer-utils';
export interface SegsInfo {
/**
* Common segment size before encryption. Encrypted segment is poly-bytes
* longer.
* Last segments in segment chains may be smaller than this value.
*/
segSize: number;
/**
* Array with info objects about chains of segments with related nonces.
* This array shall have zero elements, if file is empty.
* If it is an endless file, then the last segment chain is endless.
*/
segChains: SegsChainInfo[];
formatVersion: number;
}
export interface AttrSegInfo {
nonce: Uint8Array;
size: number;
}
export interface FiniteSegsChainInfo {
nonce: Uint8Array;
numOfSegs: number;
lastSegSize: number;
isEndless?: undefined;
}
export interface EndlessSegsChainInfo {
nonce: Uint8Array;
isEndless: true;
}
export type SegsChainInfo = FiniteSegsChainInfo|EndlessSegsChainInfo;
export function headerContentFor(s: SegsInfo, pads: number): Uint8Array {
assert(Number.isInteger(pads) && (pads >= 0));
if ((s.formatVersion === 1)
|| (s.formatVersion === 2)) {
return assembleV1andV2HeaderContent(s, pads);
} else {
throw new Error(`Version ${s.formatVersion} is not known`);
}
}
const V_1_2_CHAIN_LEN_IN_H = 3 + 4 + NONCE_LENGTH;
function assembleV1andV2HeaderContent(s: SegsInfo, pads: number): Uint8Array {
const headerLen = 1 + 2 + V_1_2_CHAIN_LEN_IN_H*(s.segChains.length + pads);
const h = new Uint8Array(headerLen);
let pos = 0;
// 1) version byte
h[pos] = s.formatVersion;
pos += 1;
// 3) segment size in 256 byte units
storeUintIn2Bytes(h, pos, s.segSize >>> 8);
pos += 2;
// 4.1) pads: array h is already initialized to all zeros
pos += V_1_2_CHAIN_LEN_IN_H*pads;
// 4.2) segment chains
for (let i=0; i<s.segChains.length; i+=1) {
const chainInfo = s.segChains[i];
// 4.1) number of segments in the chain
const numOfSegs = (chainInfo.isEndless ?
MAX_SEG_INDEX : chainInfo.numOfSegs);
storeUintIn4Bytes(h, pos, numOfSegs);
pos += 4;
// 4.2) last segment size
const lastSegSize = (chainInfo.isEndless ?
s.segSize : chainInfo.lastSegSize);
storeUintIn3Bytes(h, pos, lastSegSize);
pos += 3;
// 4.3) 1st segment nonce
h.set(chainInfo.nonce, pos);
pos += chainInfo.nonce.length;
}
return h;
}
/**
* @param x
* @param i
* @return unsigned 16-bit integer (2 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom2Bytes(x: Uint8Array, i: number): number {
return (x[i] << 8) | x[i+1];
}
/**
* @param x
* @param i
* @param u is an unsigned 16-bit integer (2 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn2Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 8;
x[i+1] = u;
}
/**
* @param x
* @param i
* @return unsigned 24-bit integer (3 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom3Bytes(x: Uint8Array, i: number): number {
return (x[i] << 16) | (x[i+1] << 8) | x[i+2];
}
/**
* @param x
* @param i
* @param u is an unsigned 24-bit integer (3 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn3Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 16;
x[i+1] = u >>> 8;
x[i+2] = u;
}
/**
* @param x
* @param i
* @return unsigned 32-bit integer (4 bytes), stored big-endian way in x,
* starting at index i.
*/
export function loadUintFrom4Bytes(x: Uint8Array, i: number): number {
// Note that (x << 24) may produce negative number, probably due to
// treating intermediate integer as signed, and pulling sign to resulting
// float number. Hence, we need a bit different operation here.
return x[i]*0x1000000 + ((x[i+1] << 16) | (x[i+2] << 8) | x[i+3]);
}
/**
* @param x
* @param i
* @param u is an unsigned 32-bit integer (4 bytes) to be stored big-endian
* way in x, starting at index i.
*/
export function storeUintIn4Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 24;
x[i+1] = u >>> 16;
x[i+2] = u >>> 8;
x[i+3] = u;
}
export function readSegsInfoFromHeader(h: Uint8Array): SegsInfo {
if (h.length < 1) { throw inputException(``); }
const v = h[0];
if ((v === 1) || (v === 2)) {
return readV1orV2Header(h);
} else {
throw inputException(`Given header version ${v} is not supported`);
}
}
export interface Exception {
runtimeException: true;
type: 'xsp';
msg?: string,
cause?: any;
}
export function makeBaseException(msg?: string, cause?: any): Exception {
return { runtimeException: true, type: 'xsp', msg, cause };
}
export type ExceptionFlag = 'inputParsing' | 'argsOutOfBounds' | 'unknownSeg' |
'concurrentIteration';
export function exception(
flag: ExceptionFlag, msg?: string, cause?: any
): Exception {
const e = makeBaseException(msg, cause);
e[flag] = true;
return e;
}
export function inputException(msg?: string, cause?: any): Exception {
return exception('inputParsing', msg, cause);
}
function readV1orV2Header(h: Uint8Array): SegsInfo {
if (!isV1andV2HeaderLength(h.length)) { throw inputException(
`Header content size ${h.length} doesn't correspond to version 1.`); }
// 1) check version byte
const formatVersion = h[0];
if ((formatVersion !== 1) && (formatVersion !== 2)) { throw inputException(
`Given header version is ${formatVersion} instead of 1 or 2`); }
let pos = 1;
// 3) segment size in 256 byte units
const segSize = loadUintFrom2Bytes(h, pos) << 8;
pos += 2;
// 4) segment chains
const segChains: SegsChainInfo[] = [];
while (pos < h.length) {
// 4.1) number of segments in the chain
const numOfSegs = loadUintFrom4Bytes(h, pos);
pos += 4;
// 4.2) last segment size
const lastSegSize = loadUintFrom3Bytes(h, pos);
pos += 3;
// 4.3) 1st segment nonce
const nonce = makeUint8ArrayCopy(h.subarray(pos, pos+NONCE_LENGTH));
pos += NONCE_LENGTH;
// distinguish between finite and endless segment chains
let chainInfo: SegsChainInfo;
if ((numOfSegs === MAX_SEG_INDEX) && (lastSegSize === segSize)) {
if (pos < h.length) { throw inputException(
`Invalid header: endless segment chain isn't the last.`); }
chainInfo = { isEndless: true, nonce };
} else {
chainInfo = { numOfSegs, lastSegSize, nonce };
}
if (numOfSegs > 0) {
segChains.push(chainInfo);
}
}
return { segChains, segSize, formatVersion };
}
function isV1andV2HeaderLength(len: number): boolean {
len -= (1 + 2);
if (len < 0) { return false; }
if ((len % 31) === 0) { return true; }
len -= 24;
if (len < 0) { return false; }
return ((len % 31) === 0);
}
export interface SegId {
chain: number;
seg: number;
}
interface ChainLocations {
chain: SegsChainInfo;
content: {
start: number;
end?: number;
};
packed: {
start: number;
end?: number;
};
}
export interface LocationInSegment extends SegId {
posInSeg: number;
}
export interface SegmentInfo extends SegId {
/**
* Offset of the packed segment in all of segment bytes.
*/
packedOfs: number;
/**
* Packed segment's length. If segment chain is endless, segment can be
* shorter.
*/
packedLen: number;
/**
* Offset of segment's content in all of content.
*/
contentOfs: number;
/**
* Length of content in this segment. If segment chain is endless, segment
* can be shorter.
*/
contentLen: number;
/**
* This flag's true value indicates that segment's chain is endless.
*/
endlessChain?: true;
}
export class Locations {
private locations: ChainLocations[] = [];
private variant = { num: 0 };
constructor(
private segs: SegsInfo
) {
this.update();
Object.seal(this);
}
update(): void {
this.locations = [];
let contentOffset = 0;
let offset = 0;
for (let chain of this.segs.segChains) {
let chainLocations: ChainLocations;
if (chain.isEndless) {
chainLocations = {
chain,
packed: {
start: offset,
},
content: {
start: contentOffset,
}
};
} else {
const contentLen = (chain.numOfSegs-1)*this.segs.segSize + chain.lastSegSize;
const packedSize = contentLen + chain.numOfSegs*POLY_LENGTH;
chainLocations = {
chain,
packed: {
start: offset,
end: offset + packedSize
},
content: {
start: contentOffset,
end: contentOffset + contentLen
}
};
offset = chainLocations.packed.end!;
contentOffset = chainLocations.content.end!;
}
this.locations.push(chainLocations);
}
this.variant.num += 1;
}
get defaultSegSize(): number
|
get totalSegsLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.packed.end;
}
get finitePartSegsLen(): number {
const totalLen = this.totalSegsLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.packed.end!;
}
get totalContentLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.content.end;
}
get finitePartContentLen(): number {
const totalLen = this.totalContentLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.content.end!;
}
locateContentOfs(contentPosition: number): LocationInSegment {
if (contentPosition < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.content.end === undefined) ? true : (l.content.end > contentPosition)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
contentPosition -= l.content.start;
const seg = Math.floor(contentPosition / this.segs.segSize);
const posInSeg = (contentPosition - seg*this.segs.segSize);
return { chain, seg, posInSeg };
}
locateSegsOfs(segsOfs: number): LocationInSegment {
if (segsOfs < 0) { throw exception('argsOutOfBounds',
"Given segment offset is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.packed.end === undefined) ? true : (l.packed.end > segsOfs)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
segsOfs -= l.packed.start;
const seg = Math.floor(segsOfs / (this.segs.segSize + POLY_LENGTH));
const posInSeg = (segsOfs - seg*(this.segs.segSize + POLY_LENGTH));
return { chain, seg, posInSeg };
}
getChainLocations(
indOrChain: number|SegsChainInfo
): ChainLocations|undefined {
if (typeof indOrChain === 'number') {
return this.locations[indOrChain];
} else {
return this.locations.find(l => (l.chain === indOrChain));
}
}
segmentInfo<T extends SegmentInfo>(
segId: SegId, infoExtender?: InfoExtender<T>
): T {
const l = this.locations[segId.chain];
if (!l) { throw exception('argsOutOfBounds',
`Chain ${segId.chain} is not found`); }
return segmentInfo(
segId.chain, segId.seg, l, this.segs.segSize, infoExtender);
}
segmentInfos<T extends SegmentInfo>(
fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
return segmentInfos(
this.locations, this.segs.segSize, this.variant, fstSeg, infoExtender);
}
segmentNonce(segId: SegId): Uint8Array {
const chain = this.segs.segChains[segId.chain];
if (!chain) { throw exception('unknownSeg'); }
if (chain.isEndless) {
if (segId.seg > MAX_SEG_INDEX) { throw exception('unknownSeg'); }
return calculateNonce(chain.nonce, segId.seg);
} else if (segId.seg < chain.numOfSegs) {
return calculateNonce(chain.nonce, segId.seg);
} else {
throw exception('unknownSeg');
}
}
}
Object.freeze(Locations.prototype);
Object.freeze(Locations);
export const MAX_SEG_INDEX = 0xffffffff;
export type InfoExtender<T extends SegmentInfo> =
(chain: SegsChainInfo, segInd: number, info: T) => T;
function segmentInfo<T extends SegmentInfo>(
chain: number, seg: number, l: ChainLocations, segSize: number,
infoExtender?: InfoExtender<T>
): T {
if (seg < 0) { throw exception('argsOutOfBounds',
`Invalid segment index ${seg}`); }
const contentOfs = l.content.start + seg*segSize;
const packedOfs = l.packed.start + seg*(segSize+POLY_LENGTH);
let s: SegmentInfo;
if (l.chain.isEndless) {
const contentLen = segSize;
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg,
endlessChain: true, packedOfs, packedLen, contentOfs, contentLen };
} else {
if (seg >= l.chain.numOfSegs) { throw exception('argsOutOfBounds',
`Segment ${seg} is not found`); }
const lastSeg = (seg === (l.chain.numOfSegs-1));
const contentLen = (lastSeg ? l.chain.lastSegSize : segSize);
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg, packedOfs, packedLen, contentOfs, contentLen };
}
return (infoExtender ? infoExtender(l.chain, seg, s as T): (s as T));
}
function* segmentInfos<T extends SegmentInfo>(
locations: ChainLocations[], segSize: number, variant: { num: number; }, fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
const initVariant = variant.num;
let fstChainInd = 0;
let fstSegInd = 0;
if (fstSeg) {
fstChainInd = fstSeg.chain;
fstSegInd = fstSeg.seg;
}
for (let chain=fstChainInd; chain<locations.length; chain+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
const l = locations[chain];
const segIndexLimit = (l.chain.isEndless ?
MAX_SEG_INDEX+1 : l.chain.numOfSegs);
for (let seg=fstSegInd; seg<segIndexLimit; seg+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
yield segmentInfo(chain, seg, l, segSize, infoExtender);
}
fstSegInd = 0;
if (l.chain.isEndless) { throw new Error(
`Generator in endless chain is not supposed to be run till its done, and it has already run all allowed segment index values.`); }
}
}
Object.freeze(exports);
|
{
return this.segs.segSize;
}
|
identifier_body
|
xsp-info.ts
|
/*
Copyright(c) 2015 - 2021 3NSoft Inc.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* This file contains code for working with file headers and (un)packing
* file segments.
* Exported utility should be used inside xsp library.
*/
import { calculateNonce, POLY_LENGTH, NONCE_LENGTH } from '../utils/crypt-utils';
import { assert } from '../utils/assert';
import { makeUint8ArrayCopy } from '../utils/buffer-utils';
export interface SegsInfo {
/**
* Common segment size before encryption. Encrypted segment is poly-bytes
* longer.
* Last segments in segment chains may be smaller than this value.
*/
segSize: number;
/**
* Array with info objects about chains of segments with related nonces.
* This array shall have zero elements, if file is empty.
* If it is an endless file, then the last segment chain is endless.
*/
segChains: SegsChainInfo[];
formatVersion: number;
}
export interface AttrSegInfo {
nonce: Uint8Array;
size: number;
}
export interface FiniteSegsChainInfo {
nonce: Uint8Array;
numOfSegs: number;
lastSegSize: number;
isEndless?: undefined;
}
export interface EndlessSegsChainInfo {
nonce: Uint8Array;
isEndless: true;
}
export type SegsChainInfo = FiniteSegsChainInfo|EndlessSegsChainInfo;
export function headerContentFor(s: SegsInfo, pads: number): Uint8Array {
assert(Number.isInteger(pads) && (pads >= 0));
if ((s.formatVersion === 1)
|| (s.formatVersion === 2)) {
return assembleV1andV2HeaderContent(s, pads);
} else {
throw new Error(`Version ${s.formatVersion} is not known`);
}
}
const V_1_2_CHAIN_LEN_IN_H = 3 + 4 + NONCE_LENGTH;
function assembleV1andV2HeaderContent(s: SegsInfo, pads: number): Uint8Array {
const headerLen = 1 + 2 + V_1_2_CHAIN_LEN_IN_H*(s.segChains.length + pads);
const h = new Uint8Array(headerLen);
let pos = 0;
// 1) version byte
h[pos] = s.formatVersion;
pos += 1;
// 3) segment size in 256 byte units
storeUintIn2Bytes(h, pos, s.segSize >>> 8);
pos += 2;
// 4.1) pads: array h is already initialized to all zeros
pos += V_1_2_CHAIN_LEN_IN_H*pads;
// 4.2) segment chains
for (let i=0; i<s.segChains.length; i+=1) {
const chainInfo = s.segChains[i];
// 4.1) number of segments in the chain
const numOfSegs = (chainInfo.isEndless ?
MAX_SEG_INDEX : chainInfo.numOfSegs);
storeUintIn4Bytes(h, pos, numOfSegs);
pos += 4;
// 4.2) last segment size
const lastSegSize = (chainInfo.isEndless ?
s.segSize : chainInfo.lastSegSize);
storeUintIn3Bytes(h, pos, lastSegSize);
pos += 3;
// 4.3) 1st segment nonce
h.set(chainInfo.nonce, pos);
pos += chainInfo.nonce.length;
}
return h;
}
/**
* @param x
* @param i
* @return unsigned 16-bit integer (2 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom2Bytes(x: Uint8Array, i: number): number {
return (x[i] << 8) | x[i+1];
}
/**
* @param x
* @param i
* @param u is an unsigned 16-bit integer (2 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn2Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 8;
x[i+1] = u;
}
/**
* @param x
* @param i
* @return unsigned 24-bit integer (3 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom3Bytes(x: Uint8Array, i: number): number {
return (x[i] << 16) | (x[i+1] << 8) | x[i+2];
}
/**
* @param x
* @param i
* @param u is an unsigned 24-bit integer (3 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn3Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 16;
x[i+1] = u >>> 8;
x[i+2] = u;
}
/**
* @param x
* @param i
* @return unsigned 32-bit integer (4 bytes), stored big-endian way in x,
* starting at index i.
*/
export function
|
(x: Uint8Array, i: number): number {
// Note that (x << 24) may produce negative number, probably due to
// treating intermediate integer as signed, and pulling sign to resulting
// float number. Hence, we need a bit different operation here.
return x[i]*0x1000000 + ((x[i+1] << 16) | (x[i+2] << 8) | x[i+3]);
}
/**
* @param x
* @param i
* @param u is an unsigned 32-bit integer (4 bytes) to be stored big-endian
* way in x, starting at index i.
*/
export function storeUintIn4Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 24;
x[i+1] = u >>> 16;
x[i+2] = u >>> 8;
x[i+3] = u;
}
export function readSegsInfoFromHeader(h: Uint8Array): SegsInfo {
if (h.length < 1) { throw inputException(``); }
const v = h[0];
if ((v === 1) || (v === 2)) {
return readV1orV2Header(h);
} else {
throw inputException(`Given header version ${v} is not supported`);
}
}
export interface Exception {
runtimeException: true;
type: 'xsp';
msg?: string,
cause?: any;
}
export function makeBaseException(msg?: string, cause?: any): Exception {
return { runtimeException: true, type: 'xsp', msg, cause };
}
export type ExceptionFlag = 'inputParsing' | 'argsOutOfBounds' | 'unknownSeg' |
'concurrentIteration';
export function exception(
flag: ExceptionFlag, msg?: string, cause?: any
): Exception {
const e = makeBaseException(msg, cause);
e[flag] = true;
return e;
}
export function inputException(msg?: string, cause?: any): Exception {
return exception('inputParsing', msg, cause);
}
function readV1orV2Header(h: Uint8Array): SegsInfo {
if (!isV1andV2HeaderLength(h.length)) { throw inputException(
`Header content size ${h.length} doesn't correspond to version 1.`); }
// 1) check version byte
const formatVersion = h[0];
if ((formatVersion !== 1) && (formatVersion !== 2)) { throw inputException(
`Given header version is ${formatVersion} instead of 1 or 2`); }
let pos = 1;
// 3) segment size in 256 byte units
const segSize = loadUintFrom2Bytes(h, pos) << 8;
pos += 2;
// 4) segment chains
const segChains: SegsChainInfo[] = [];
while (pos < h.length) {
// 4.1) number of segments in the chain
const numOfSegs = loadUintFrom4Bytes(h, pos);
pos += 4;
// 4.2) last segment size
const lastSegSize = loadUintFrom3Bytes(h, pos);
pos += 3;
// 4.3) 1st segment nonce
const nonce = makeUint8ArrayCopy(h.subarray(pos, pos+NONCE_LENGTH));
pos += NONCE_LENGTH;
// distinguish between finite and endless segment chains
let chainInfo: SegsChainInfo;
if ((numOfSegs === MAX_SEG_INDEX) && (lastSegSize === segSize)) {
if (pos < h.length) { throw inputException(
`Invalid header: endless segment chain isn't the last.`); }
chainInfo = { isEndless: true, nonce };
} else {
chainInfo = { numOfSegs, lastSegSize, nonce };
}
if (numOfSegs > 0) {
segChains.push(chainInfo);
}
}
return { segChains, segSize, formatVersion };
}
function isV1andV2HeaderLength(len: number): boolean {
len -= (1 + 2);
if (len < 0) { return false; }
if ((len % 31) === 0) { return true; }
len -= 24;
if (len < 0) { return false; }
return ((len % 31) === 0);
}
export interface SegId {
chain: number;
seg: number;
}
interface ChainLocations {
chain: SegsChainInfo;
content: {
start: number;
end?: number;
};
packed: {
start: number;
end?: number;
};
}
export interface LocationInSegment extends SegId {
posInSeg: number;
}
export interface SegmentInfo extends SegId {
/**
* Offset of the packed segment in all of segment bytes.
*/
packedOfs: number;
/**
* Packed segment's length. If segment chain is endless, segment can be
* shorter.
*/
packedLen: number;
/**
* Offset of segment's content in all of content.
*/
contentOfs: number;
/**
* Length of content in this segment. If segment chain is endless, segment
* can be shorter.
*/
contentLen: number;
/**
* This flag's true value indicates that segment's chain is endless.
*/
endlessChain?: true;
}
export class Locations {
private locations: ChainLocations[] = [];
private variant = { num: 0 };
constructor(
private segs: SegsInfo
) {
this.update();
Object.seal(this);
}
update(): void {
this.locations = [];
let contentOffset = 0;
let offset = 0;
for (let chain of this.segs.segChains) {
let chainLocations: ChainLocations;
if (chain.isEndless) {
chainLocations = {
chain,
packed: {
start: offset,
},
content: {
start: contentOffset,
}
};
} else {
const contentLen = (chain.numOfSegs-1)*this.segs.segSize + chain.lastSegSize;
const packedSize = contentLen + chain.numOfSegs*POLY_LENGTH;
chainLocations = {
chain,
packed: {
start: offset,
end: offset + packedSize
},
content: {
start: contentOffset,
end: contentOffset + contentLen
}
};
offset = chainLocations.packed.end!;
contentOffset = chainLocations.content.end!;
}
this.locations.push(chainLocations);
}
this.variant.num += 1;
}
get defaultSegSize(): number {
return this.segs.segSize;
}
get totalSegsLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.packed.end;
}
get finitePartSegsLen(): number {
const totalLen = this.totalSegsLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.packed.end!;
}
get totalContentLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.content.end;
}
get finitePartContentLen(): number {
const totalLen = this.totalContentLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.content.end!;
}
locateContentOfs(contentPosition: number): LocationInSegment {
if (contentPosition < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.content.end === undefined) ? true : (l.content.end > contentPosition)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
contentPosition -= l.content.start;
const seg = Math.floor(contentPosition / this.segs.segSize);
const posInSeg = (contentPosition - seg*this.segs.segSize);
return { chain, seg, posInSeg };
}
locateSegsOfs(segsOfs: number): LocationInSegment {
if (segsOfs < 0) { throw exception('argsOutOfBounds',
"Given segment offset is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.packed.end === undefined) ? true : (l.packed.end > segsOfs)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
segsOfs -= l.packed.start;
const seg = Math.floor(segsOfs / (this.segs.segSize + POLY_LENGTH));
const posInSeg = (segsOfs - seg*(this.segs.segSize + POLY_LENGTH));
return { chain, seg, posInSeg };
}
getChainLocations(
indOrChain: number|SegsChainInfo
): ChainLocations|undefined {
if (typeof indOrChain === 'number') {
return this.locations[indOrChain];
} else {
return this.locations.find(l => (l.chain === indOrChain));
}
}
segmentInfo<T extends SegmentInfo>(
segId: SegId, infoExtender?: InfoExtender<T>
): T {
const l = this.locations[segId.chain];
if (!l) { throw exception('argsOutOfBounds',
`Chain ${segId.chain} is not found`); }
return segmentInfo(
segId.chain, segId.seg, l, this.segs.segSize, infoExtender);
}
segmentInfos<T extends SegmentInfo>(
fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
return segmentInfos(
this.locations, this.segs.segSize, this.variant, fstSeg, infoExtender);
}
segmentNonce(segId: SegId): Uint8Array {
const chain = this.segs.segChains[segId.chain];
if (!chain) { throw exception('unknownSeg'); }
if (chain.isEndless) {
if (segId.seg > MAX_SEG_INDEX) { throw exception('unknownSeg'); }
return calculateNonce(chain.nonce, segId.seg);
} else if (segId.seg < chain.numOfSegs) {
return calculateNonce(chain.nonce, segId.seg);
} else {
throw exception('unknownSeg');
}
}
}
Object.freeze(Locations.prototype);
Object.freeze(Locations);
export const MAX_SEG_INDEX = 0xffffffff;
export type InfoExtender<T extends SegmentInfo> =
(chain: SegsChainInfo, segInd: number, info: T) => T;
function segmentInfo<T extends SegmentInfo>(
chain: number, seg: number, l: ChainLocations, segSize: number,
infoExtender?: InfoExtender<T>
): T {
if (seg < 0) { throw exception('argsOutOfBounds',
`Invalid segment index ${seg}`); }
const contentOfs = l.content.start + seg*segSize;
const packedOfs = l.packed.start + seg*(segSize+POLY_LENGTH);
let s: SegmentInfo;
if (l.chain.isEndless) {
const contentLen = segSize;
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg,
endlessChain: true, packedOfs, packedLen, contentOfs, contentLen };
} else {
if (seg >= l.chain.numOfSegs) { throw exception('argsOutOfBounds',
`Segment ${seg} is not found`); }
const lastSeg = (seg === (l.chain.numOfSegs-1));
const contentLen = (lastSeg ? l.chain.lastSegSize : segSize);
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg, packedOfs, packedLen, contentOfs, contentLen };
}
return (infoExtender ? infoExtender(l.chain, seg, s as T): (s as T));
}
function* segmentInfos<T extends SegmentInfo>(
locations: ChainLocations[], segSize: number, variant: { num: number; }, fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
const initVariant = variant.num;
let fstChainInd = 0;
let fstSegInd = 0;
if (fstSeg) {
fstChainInd = fstSeg.chain;
fstSegInd = fstSeg.seg;
}
for (let chain=fstChainInd; chain<locations.length; chain+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
const l = locations[chain];
const segIndexLimit = (l.chain.isEndless ?
MAX_SEG_INDEX+1 : l.chain.numOfSegs);
for (let seg=fstSegInd; seg<segIndexLimit; seg+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
yield segmentInfo(chain, seg, l, segSize, infoExtender);
}
fstSegInd = 0;
if (l.chain.isEndless) { throw new Error(
`Generator in endless chain is not supposed to be run till its done, and it has already run all allowed segment index values.`); }
}
}
Object.freeze(exports);
|
loadUintFrom4Bytes
|
identifier_name
|
xsp-info.ts
|
/*
Copyright(c) 2015 - 2021 3NSoft Inc.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* This file contains code for working with file headers and (un)packing
* file segments.
* Exported utility should be used inside xsp library.
*/
import { calculateNonce, POLY_LENGTH, NONCE_LENGTH } from '../utils/crypt-utils';
import { assert } from '../utils/assert';
import { makeUint8ArrayCopy } from '../utils/buffer-utils';
export interface SegsInfo {
/**
* Common segment size before encryption. Encrypted segment is poly-bytes
* longer.
* Last segments in segment chains may be smaller than this value.
*/
segSize: number;
/**
* Array with info objects about chains of segments with related nonces.
* This array shall have zero elements, if file is empty.
* If it is an endless file, then the last segment chain is endless.
*/
segChains: SegsChainInfo[];
formatVersion: number;
}
export interface AttrSegInfo {
nonce: Uint8Array;
size: number;
}
export interface FiniteSegsChainInfo {
nonce: Uint8Array;
numOfSegs: number;
lastSegSize: number;
isEndless?: undefined;
}
export interface EndlessSegsChainInfo {
nonce: Uint8Array;
isEndless: true;
}
export type SegsChainInfo = FiniteSegsChainInfo|EndlessSegsChainInfo;
export function headerContentFor(s: SegsInfo, pads: number): Uint8Array {
assert(Number.isInteger(pads) && (pads >= 0));
if ((s.formatVersion === 1)
|| (s.formatVersion === 2)) {
return assembleV1andV2HeaderContent(s, pads);
} else {
throw new Error(`Version ${s.formatVersion} is not known`);
}
}
const V_1_2_CHAIN_LEN_IN_H = 3 + 4 + NONCE_LENGTH;
function assembleV1andV2HeaderContent(s: SegsInfo, pads: number): Uint8Array {
const headerLen = 1 + 2 + V_1_2_CHAIN_LEN_IN_H*(s.segChains.length + pads);
const h = new Uint8Array(headerLen);
let pos = 0;
// 1) version byte
h[pos] = s.formatVersion;
pos += 1;
// 3) segment size in 256 byte units
storeUintIn2Bytes(h, pos, s.segSize >>> 8);
pos += 2;
// 4.1) pads: array h is already initialized to all zeros
pos += V_1_2_CHAIN_LEN_IN_H*pads;
// 4.2) segment chains
for (let i=0; i<s.segChains.length; i+=1) {
const chainInfo = s.segChains[i];
// 4.1) number of segments in the chain
const numOfSegs = (chainInfo.isEndless ?
MAX_SEG_INDEX : chainInfo.numOfSegs);
storeUintIn4Bytes(h, pos, numOfSegs);
pos += 4;
// 4.2) last segment size
const lastSegSize = (chainInfo.isEndless ?
s.segSize : chainInfo.lastSegSize);
storeUintIn3Bytes(h, pos, lastSegSize);
pos += 3;
// 4.3) 1st segment nonce
h.set(chainInfo.nonce, pos);
pos += chainInfo.nonce.length;
}
return h;
}
/**
* @param x
* @param i
* @return unsigned 16-bit integer (2 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom2Bytes(x: Uint8Array, i: number): number {
return (x[i] << 8) | x[i+1];
}
/**
* @param x
* @param i
* @param u is an unsigned 16-bit integer (2 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn2Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 8;
x[i+1] = u;
}
/**
* @param x
* @param i
* @return unsigned 24-bit integer (3 bytes), stored big-endian way in x,
* starting at index i.
*/
function loadUintFrom3Bytes(x: Uint8Array, i: number): number {
return (x[i] << 16) | (x[i+1] << 8) | x[i+2];
}
/**
* @param x
* @param i
* @param u is an unsigned 24-bit integer (3 bytes) to be stored big-endian
* way in x, starting at index i.
*/
function storeUintIn3Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 16;
x[i+1] = u >>> 8;
x[i+2] = u;
}
/**
* @param x
* @param i
* @return unsigned 32-bit integer (4 bytes), stored big-endian way in x,
* starting at index i.
*/
export function loadUintFrom4Bytes(x: Uint8Array, i: number): number {
// Note that (x << 24) may produce negative number, probably due to
// treating intermediate integer as signed, and pulling sign to resulting
// float number. Hence, we need a bit different operation here.
return x[i]*0x1000000 + ((x[i+1] << 16) | (x[i+2] << 8) | x[i+3]);
}
/**
* @param x
* @param i
* @param u is an unsigned 32-bit integer (4 bytes) to be stored big-endian
* way in x, starting at index i.
*/
export function storeUintIn4Bytes(x: Uint8Array, i: number, u: number): void {
x[i] = u >>> 24;
x[i+1] = u >>> 16;
x[i+2] = u >>> 8;
x[i+3] = u;
}
export function readSegsInfoFromHeader(h: Uint8Array): SegsInfo {
if (h.length < 1) { throw inputException(``); }
const v = h[0];
if ((v === 1) || (v === 2)) {
return readV1orV2Header(h);
} else {
throw inputException(`Given header version ${v} is not supported`);
}
}
export interface Exception {
runtimeException: true;
type: 'xsp';
msg?: string,
cause?: any;
}
export function makeBaseException(msg?: string, cause?: any): Exception {
return { runtimeException: true, type: 'xsp', msg, cause };
}
export type ExceptionFlag = 'inputParsing' | 'argsOutOfBounds' | 'unknownSeg' |
'concurrentIteration';
export function exception(
flag: ExceptionFlag, msg?: string, cause?: any
): Exception {
const e = makeBaseException(msg, cause);
e[flag] = true;
return e;
}
export function inputException(msg?: string, cause?: any): Exception {
return exception('inputParsing', msg, cause);
}
function readV1orV2Header(h: Uint8Array): SegsInfo {
if (!isV1andV2HeaderLength(h.length)) { throw inputException(
`Header content size ${h.length} doesn't correspond to version 1.`); }
// 1) check version byte
const formatVersion = h[0];
if ((formatVersion !== 1) && (formatVersion !== 2)) { throw inputException(
`Given header version is ${formatVersion} instead of 1 or 2`); }
let pos = 1;
// 3) segment size in 256 byte units
const segSize = loadUintFrom2Bytes(h, pos) << 8;
pos += 2;
// 4) segment chains
const segChains: SegsChainInfo[] = [];
while (pos < h.length) {
// 4.1) number of segments in the chain
const numOfSegs = loadUintFrom4Bytes(h, pos);
pos += 4;
// 4.2) last segment size
const lastSegSize = loadUintFrom3Bytes(h, pos);
pos += 3;
// 4.3) 1st segment nonce
const nonce = makeUint8ArrayCopy(h.subarray(pos, pos+NONCE_LENGTH));
pos += NONCE_LENGTH;
// distinguish between finite and endless segment chains
let chainInfo: SegsChainInfo;
if ((numOfSegs === MAX_SEG_INDEX) && (lastSegSize === segSize)) {
if (pos < h.length) { throw inputException(
`Invalid header: endless segment chain isn't the last.`); }
chainInfo = { isEndless: true, nonce };
} else {
chainInfo = { numOfSegs, lastSegSize, nonce };
}
if (numOfSegs > 0) {
segChains.push(chainInfo);
}
}
return { segChains, segSize, formatVersion };
}
function isV1andV2HeaderLength(len: number): boolean {
len -= (1 + 2);
if (len < 0) { return false; }
if ((len % 31) === 0) { return true; }
len -= 24;
if (len < 0) { return false; }
return ((len % 31) === 0);
}
export interface SegId {
chain: number;
seg: number;
}
interface ChainLocations {
chain: SegsChainInfo;
content: {
start: number;
end?: number;
};
packed: {
start: number;
end?: number;
};
}
export interface LocationInSegment extends SegId {
posInSeg: number;
}
export interface SegmentInfo extends SegId {
/**
* Offset of the packed segment in all of segment bytes.
*/
packedOfs: number;
/**
* Packed segment's length. If segment chain is endless, segment can be
* shorter.
*/
packedLen: number;
/**
* Offset of segment's content in all of content.
*/
contentOfs: number;
/**
* Length of content in this segment. If segment chain is endless, segment
* can be shorter.
*/
contentLen: number;
/**
* This flag's true value indicates that segment's chain is endless.
*/
endlessChain?: true;
}
export class Locations {
private locations: ChainLocations[] = [];
private variant = { num: 0 };
constructor(
private segs: SegsInfo
) {
this.update();
Object.seal(this);
}
update(): void {
this.locations = [];
let contentOffset = 0;
let offset = 0;
for (let chain of this.segs.segChains) {
let chainLocations: ChainLocations;
if (chain.isEndless) {
chainLocations = {
chain,
packed: {
start: offset,
},
content: {
start: contentOffset,
}
};
} else {
const contentLen = (chain.numOfSegs-1)*this.segs.segSize + chain.lastSegSize;
const packedSize = contentLen + chain.numOfSegs*POLY_LENGTH;
chainLocations = {
chain,
packed: {
start: offset,
end: offset + packedSize
},
content: {
start: contentOffset,
end: contentOffset + contentLen
}
};
offset = chainLocations.packed.end!;
contentOffset = chainLocations.content.end!;
}
this.locations.push(chainLocations);
}
this.variant.num += 1;
}
get defaultSegSize(): number {
return this.segs.segSize;
}
get totalSegsLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.packed.end;
}
get finitePartSegsLen(): number {
const totalLen = this.totalSegsLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.packed.end!;
}
get totalContentLen(): number|undefined {
if (this.locations.length === 0) { return 0; }
const lastChain = this.locations[this.locations.length-1];
return lastChain.content.end;
}
get finitePartContentLen(): number {
const totalLen = this.totalContentLen;
if (typeof totalLen === 'number') { return totalLen; }
if (this.locations.length < 2) { return 0; }
const l = this.locations[this.locations.length-2];
assert(!l.chain.isEndless);
return l.content.end!;
}
locateContentOfs(contentPosition: number): LocationInSegment {
if (contentPosition < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.content.end === undefined) ? true : (l.content.end > contentPosition)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
contentPosition -= l.content.start;
const seg = Math.floor(contentPosition / this.segs.segSize);
const posInSeg = (contentPosition - seg*this.segs.segSize);
return { chain, seg, posInSeg };
}
locateSegsOfs(segsOfs: number): LocationInSegment {
if (segsOfs < 0) { throw exception('argsOutOfBounds',
"Given segment offset is out of bounds."); }
const chain = this.locations.findIndex(l => ((l.packed.end === undefined) ? true : (l.packed.end > segsOfs)));
if (chain < 0) { throw exception('argsOutOfBounds',
"Given position is out of bounds."); }
const l = this.locations[chain];
segsOfs -= l.packed.start;
const seg = Math.floor(segsOfs / (this.segs.segSize + POLY_LENGTH));
const posInSeg = (segsOfs - seg*(this.segs.segSize + POLY_LENGTH));
return { chain, seg, posInSeg };
|
indOrChain: number|SegsChainInfo
): ChainLocations|undefined {
if (typeof indOrChain === 'number') {
return this.locations[indOrChain];
} else {
return this.locations.find(l => (l.chain === indOrChain));
}
}
segmentInfo<T extends SegmentInfo>(
segId: SegId, infoExtender?: InfoExtender<T>
): T {
const l = this.locations[segId.chain];
if (!l) { throw exception('argsOutOfBounds',
`Chain ${segId.chain} is not found`); }
return segmentInfo(
segId.chain, segId.seg, l, this.segs.segSize, infoExtender);
}
segmentInfos<T extends SegmentInfo>(
fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
return segmentInfos(
this.locations, this.segs.segSize, this.variant, fstSeg, infoExtender);
}
segmentNonce(segId: SegId): Uint8Array {
const chain = this.segs.segChains[segId.chain];
if (!chain) { throw exception('unknownSeg'); }
if (chain.isEndless) {
if (segId.seg > MAX_SEG_INDEX) { throw exception('unknownSeg'); }
return calculateNonce(chain.nonce, segId.seg);
} else if (segId.seg < chain.numOfSegs) {
return calculateNonce(chain.nonce, segId.seg);
} else {
throw exception('unknownSeg');
}
}
}
Object.freeze(Locations.prototype);
Object.freeze(Locations);
export const MAX_SEG_INDEX = 0xffffffff;
export type InfoExtender<T extends SegmentInfo> =
(chain: SegsChainInfo, segInd: number, info: T) => T;
function segmentInfo<T extends SegmentInfo>(
chain: number, seg: number, l: ChainLocations, segSize: number,
infoExtender?: InfoExtender<T>
): T {
if (seg < 0) { throw exception('argsOutOfBounds',
`Invalid segment index ${seg}`); }
const contentOfs = l.content.start + seg*segSize;
const packedOfs = l.packed.start + seg*(segSize+POLY_LENGTH);
let s: SegmentInfo;
if (l.chain.isEndless) {
const contentLen = segSize;
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg,
endlessChain: true, packedOfs, packedLen, contentOfs, contentLen };
} else {
if (seg >= l.chain.numOfSegs) { throw exception('argsOutOfBounds',
`Segment ${seg} is not found`); }
const lastSeg = (seg === (l.chain.numOfSegs-1));
const contentLen = (lastSeg ? l.chain.lastSegSize : segSize);
const packedLen = contentLen + POLY_LENGTH;
s = { chain, seg, packedOfs, packedLen, contentOfs, contentLen };
}
return (infoExtender ? infoExtender(l.chain, seg, s as T): (s as T));
}
function* segmentInfos<T extends SegmentInfo>(
locations: ChainLocations[], segSize: number, variant: { num: number; }, fstSeg?: SegId, infoExtender?: InfoExtender<T>
): IterableIterator<T> {
const initVariant = variant.num;
let fstChainInd = 0;
let fstSegInd = 0;
if (fstSeg) {
fstChainInd = fstSeg.chain;
fstSegInd = fstSeg.seg;
}
for (let chain=fstChainInd; chain<locations.length; chain+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
const l = locations[chain];
const segIndexLimit = (l.chain.isEndless ?
MAX_SEG_INDEX+1 : l.chain.numOfSegs);
for (let seg=fstSegInd; seg<segIndexLimit; seg+=1) {
if (initVariant !== variant.num) { throw exception(
'concurrentIteration',
`Can't iterate cause underlying index has changed.`); }
yield segmentInfo(chain, seg, l, segSize, infoExtender);
}
fstSegInd = 0;
if (l.chain.isEndless) { throw new Error(
`Generator in endless chain is not supposed to be run till its done, and it has already run all allowed segment index values.`); }
}
}
Object.freeze(exports);
|
}
getChainLocations(
|
random_line_split
|
Observable.test.ts
|
import {TeardownLogic, Subscription, SubscriptionHandler, Observable, PartialObserver, Subscriber} from 'tabris';
let observable: Observable<string>;
let observer: Subscriber<string>;
let partialObserver: PartialObserver<string> = {};
let subHandler: SubscriptionHandler<string>;
let subscription: Subscription;
let teardownObject: TeardownLogic = () => undefined;
let str: string = '';
|
teardownObject = {unsubscribe: () => undefined}
observable = new Observable(() => {});
observable = new Observable<string>(() => {});
observable = new Observable(observerArg => {
observer = observerArg;
bool = observer.closed;
});
observable = new Observable(observerArg => {
observerArg.next('foo');
observerArg.error(new Error('foo'));
observerArg.complete();
});
observable = new Observable(() => teardownObject);
subscription = observable.subscribe(partialObserver);
subscription = observable.subscribe(arg => str = arg, (arg: Error) => err = arg, () => undefined);
subscription = observable.subscribe(null, null, () => undefined);
subscription = observable.subscribe(arg => str = arg, (arg: Error) => err = arg);
subscription = observable.subscribe(arg => str = arg);
let bool: boolean = subscription.closed;
subscription.unsubscribe();
observable[Symbol.observable]().subscribe(partialObserver);
subscription = Observable.mutations(new Date()).subscribe(value => str = value.toLocaleDateString());
|
let err: Error;
|
random_line_split
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Layman is a complete library for the operation and maintainance
on all gentoo repositories and overlays
"""
import sys
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
from layman.output import Message
except ImportError:
sys.stderr.write("!!! Layman API imports failed.")
raise
class
|
(LaymanAPI):
"""A complete high level interface capable of performing all
overlay repository actions."""
def __init__(self, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr,
config=None, read_configfile=True, quiet=False, quietness=4,
verbose=False, nocolor=False, width=0, root=None
):
"""Input parameters are optional to override the defaults.
sets up our LaymanAPI with defaults or passed in values
and returns an instance of it"""
self.message = Message(out=stdout, err=stderr)
self.config = BareConfig(
output=self.message,
stdout=stdout,
stdin=stdin,
stderr=stderr,
config=config,
read_configfile=read_configfile,
quiet=quiet,
quietness=quietness,
verbose=verbose,
nocolor=nocolor,
width=width,
root=root
)
LaymanAPI.__init__(self, self.config,
report_errors=True,
output=self.config['output']
)
return
|
Layman
|
identifier_name
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Layman is a complete library for the operation and maintainance
on all gentoo repositories and overlays
"""
import sys
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
from layman.output import Message
except ImportError:
sys.stderr.write("!!! Layman API imports failed.")
raise
class Layman(LaymanAPI):
"""A complete high level interface capable of performing all
overlay repository actions."""
def __init__(self, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr,
config=None, read_configfile=True, quiet=False, quietness=4,
verbose=False, nocolor=False, width=0, root=None
):
"""Input parameters are optional to override the defaults.
sets up our LaymanAPI with defaults or passed in values
and returns an instance of it"""
self.message = Message(out=stdout, err=stderr)
self.config = BareConfig(
output=self.message,
stdout=stdout,
stdin=stdin,
stderr=stderr,
config=config,
read_configfile=read_configfile,
quiet=quiet,
|
nocolor=nocolor,
width=width,
root=root
)
LaymanAPI.__init__(self, self.config,
report_errors=True,
output=self.config['output']
)
return
|
quietness=quietness,
verbose=verbose,
|
random_line_split
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Layman is a complete library for the operation and maintainance
on all gentoo repositories and overlays
"""
import sys
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
from layman.output import Message
except ImportError:
sys.stderr.write("!!! Layman API imports failed.")
raise
class Layman(LaymanAPI):
|
"""A complete high level interface capable of performing all
overlay repository actions."""
def __init__(self, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr,
config=None, read_configfile=True, quiet=False, quietness=4,
verbose=False, nocolor=False, width=0, root=None
):
"""Input parameters are optional to override the defaults.
sets up our LaymanAPI with defaults or passed in values
and returns an instance of it"""
self.message = Message(out=stdout, err=stderr)
self.config = BareConfig(
output=self.message,
stdout=stdout,
stdin=stdin,
stderr=stderr,
config=config,
read_configfile=read_configfile,
quiet=quiet,
quietness=quietness,
verbose=verbose,
nocolor=nocolor,
width=width,
root=root
)
LaymanAPI.__init__(self, self.config,
report_errors=True,
output=self.config['output']
)
return
|
identifier_body
|
|
settings.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
"use strict";
import vscode = require("vscode");
import utils = require("./utils");
enum CodeFormattingPreset {
Custom,
Allman,
OTBS,
Stroustrup,
}
enum PipelineIndentationStyle {
IncreaseIndentationForFirstPipeline,
IncreaseIndentationAfterEveryPipeline,
NoIndentation,
None,
}
export enum CommentType {
Disabled = "Disabled",
BlockComment = "BlockComment",
LineComment = "LineComment",
}
export interface IPowerShellAdditionalExePathSettings {
versionName: string;
exePath: string;
}
export interface IBugReportingSettings {
project: string;
}
export interface ICodeFoldingSettings {
enable?: boolean;
showLastLine?: boolean;
}
export interface ICodeFormattingSettings {
autoCorrectAliases: boolean;
preset: CodeFormattingPreset;
openBraceOnSameLine: boolean;
newLineAfterOpenBrace: boolean;
newLineAfterCloseBrace: boolean;
pipelineIndentationStyle: PipelineIndentationStyle;
whitespaceBeforeOpenBrace: boolean;
whitespaceBeforeOpenParen: boolean;
whitespaceAroundOperator: boolean;
whitespaceAfterSeparator: boolean;
whitespaceBetweenParameters: boolean;
whitespaceInsideBrace: boolean;
addWhitespaceAroundPipe: boolean;
trimWhitespaceAroundPipe: boolean;
ignoreOneLineBlock: boolean;
alignPropertyValuePairs: boolean;
useConstantStrings: boolean;
useCorrectCasing: boolean;
}
export interface IScriptAnalysisSettings {
enable?: boolean;
settingsPath: string;
}
export interface IDebuggingSettings {
createTemporaryIntegratedConsole?: boolean;
}
export interface IDeveloperSettings {
featureFlags?: string[];
bundledModulesPath?: string;
editorServicesLogLevel?: string;
editorServicesWaitForDebugger?: boolean;
waitForSessionFileTimeoutSeconds?: number;
}
export interface ISettings {
powerShellAdditionalExePaths?: IPowerShellAdditionalExePathSettings[];
powerShellDefaultVersion?: string;
// This setting is no longer used but is here to assist in cleaning up the users settings.
powerShellExePath?: string;
promptToUpdatePowerShell?: boolean;
promptToUpdatePackageManagement?: boolean;
bundledModulesPath?: string;
startAsLoginShell?: IStartAsLoginShellSettings;
startAutomatically?: boolean;
useX86Host?: boolean;
enableProfileLoading?: boolean;
helpCompletion: string;
scriptAnalysis?: IScriptAnalysisSettings;
debugging?: IDebuggingSettings;
developer?: IDeveloperSettings;
codeFolding?: ICodeFoldingSettings;
codeFormatting?: ICodeFormattingSettings;
integratedConsole?: IIntegratedConsoleSettings;
bugReporting?: IBugReportingSettings;
sideBar?: ISideBarSettings;
pester?: IPesterSettings;
buttons?: IButtonSettings;
cwd?: string;
notebooks?: INotebooksSettings;
}
export interface IStartAsLoginShellSettings {
osx?: boolean;
linux?: boolean;
}
export interface IIntegratedConsoleSettings {
showOnStartup?: boolean;
focusConsoleOnExecute?: boolean;
useLegacyReadLine?: boolean;
forceClearScrollbackBuffer?: boolean;
suppressStartupBanner?: boolean;
}
export interface ISideBarSettings {
CommandExplorerVisibility?: boolean;
}
export interface IPesterSettings {
useLegacyCodeLens?: boolean;
outputVerbosity?: string;
debugOutputVerbosity?: string;
}
export interface IButtonSettings {
showRunButtons?: boolean;
showPanelMovementButtons?: boolean;
}
export interface INotebooksSettings {
saveMarkdownCellsAs?: CommentType;
}
export function
|
(): ISettings {
const configuration: vscode.WorkspaceConfiguration =
vscode.workspace.getConfiguration(
utils.PowerShellLanguageId);
const defaultBugReportingSettings: IBugReportingSettings = {
project: "https://github.com/PowerShell/vscode-powershell",
};
const defaultScriptAnalysisSettings: IScriptAnalysisSettings = {
enable: true,
settingsPath: "PSScriptAnalyzerSettings.psd1",
};
const defaultDebuggingSettings: IDebuggingSettings = {
createTemporaryIntegratedConsole: false,
};
const defaultDeveloperSettings: IDeveloperSettings = {
featureFlags: [],
// From `<root>/out/main.js` we go to the directory before <root> and
// then into the other repo.
bundledModulesPath: "../../PowerShellEditorServices/module",
editorServicesLogLevel: "Normal",
editorServicesWaitForDebugger: false,
waitForSessionFileTimeoutSeconds: 240,
};
const defaultCodeFoldingSettings: ICodeFoldingSettings = {
enable: true,
showLastLine: false,
};
const defaultCodeFormattingSettings: ICodeFormattingSettings = {
autoCorrectAliases: false,
preset: CodeFormattingPreset.Custom,
openBraceOnSameLine: true,
newLineAfterOpenBrace: true,
newLineAfterCloseBrace: true,
pipelineIndentationStyle: PipelineIndentationStyle.NoIndentation,
whitespaceBeforeOpenBrace: true,
whitespaceBeforeOpenParen: true,
whitespaceAroundOperator: true,
whitespaceAfterSeparator: true,
whitespaceBetweenParameters: false,
whitespaceInsideBrace: true,
addWhitespaceAroundPipe: true,
trimWhitespaceAroundPipe: false,
ignoreOneLineBlock: true,
alignPropertyValuePairs: true,
useConstantStrings: false,
useCorrectCasing: false,
};
const defaultStartAsLoginShellSettings: IStartAsLoginShellSettings = {
osx: true,
linux: false,
};
const defaultIntegratedConsoleSettings: IIntegratedConsoleSettings = {
showOnStartup: true,
focusConsoleOnExecute: true,
useLegacyReadLine: false,
forceClearScrollbackBuffer: false,
};
const defaultSideBarSettings: ISideBarSettings = {
CommandExplorerVisibility: true,
};
const defaultButtonSettings: IButtonSettings = {
showRunButtons: true,
showPanelMovementButtons: false
};
const defaultPesterSettings: IPesterSettings = {
useLegacyCodeLens: true,
outputVerbosity: "FromPreference",
debugOutputVerbosity: "Diagnostic",
};
const defaultNotebooksSettings: INotebooksSettings = {
saveMarkdownCellsAs: CommentType.BlockComment,
};
return {
startAutomatically:
configuration.get<boolean>("startAutomatically", true),
powerShellAdditionalExePaths:
configuration.get<IPowerShellAdditionalExePathSettings[]>("powerShellAdditionalExePaths", undefined),
powerShellDefaultVersion:
configuration.get<string>("powerShellDefaultVersion", undefined),
powerShellExePath:
configuration.get<string>("powerShellExePath", undefined),
promptToUpdatePowerShell:
configuration.get<boolean>("promptToUpdatePowerShell", true),
promptToUpdatePackageManagement:
configuration.get<boolean>("promptToUpdatePackageManagement", true),
bundledModulesPath:
"../modules", // Because the extension is always at `<root>/out/main.js`
useX86Host:
configuration.get<boolean>("useX86Host", false),
enableProfileLoading:
configuration.get<boolean>("enableProfileLoading", false),
helpCompletion:
configuration.get<string>("helpCompletion", CommentType.BlockComment),
scriptAnalysis:
configuration.get<IScriptAnalysisSettings>("scriptAnalysis", defaultScriptAnalysisSettings),
debugging:
configuration.get<IDebuggingSettings>("debugging", defaultDebuggingSettings),
developer:
getWorkspaceSettingsWithDefaults<IDeveloperSettings>(configuration, "developer", defaultDeveloperSettings),
codeFolding:
configuration.get<ICodeFoldingSettings>("codeFolding", defaultCodeFoldingSettings),
codeFormatting:
configuration.get<ICodeFormattingSettings>("codeFormatting", defaultCodeFormattingSettings),
integratedConsole:
configuration.get<IIntegratedConsoleSettings>("integratedConsole", defaultIntegratedConsoleSettings),
bugReporting:
configuration.get<IBugReportingSettings>("bugReporting", defaultBugReportingSettings),
sideBar:
configuration.get<ISideBarSettings>("sideBar", defaultSideBarSettings),
pester:
configuration.get<IPesterSettings>("pester", defaultPesterSettings),
buttons:
configuration.get<IButtonSettings>("buttons", defaultButtonSettings),
notebooks:
configuration.get<INotebooksSettings>("notebooks", defaultNotebooksSettings),
startAsLoginShell:
// tslint:disable-next-line
// We follow the same convention as VS Code - https://github.com/microsoft/vscode/blob/ff00badd955d6cfcb8eab5f25f3edc86b762f49f/src/vs/workbench/contrib/terminal/browser/terminal.contribution.ts#L105-L107
// "Unlike on Linux, ~/.profile is not sourced when logging into a macOS session. This
// is the reason terminals on macOS typically run login shells by default which set up
// the environment. See http://unix.stackexchange.com/a/119675/115410"
configuration.get<IStartAsLoginShellSettings>("startAsLoginShell", defaultStartAsLoginShellSettings),
cwd:
configuration.get<string>("cwd", null),
};
}
// Get the ConfigurationTarget (read: scope) of where the *effective* setting value comes from
export async function getEffectiveConfigurationTarget(settingName: string): Promise<vscode.ConfigurationTarget> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
const detail = configuration.inspect(settingName);
let configurationTarget = null;
if (typeof detail.workspaceFolderValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.WorkspaceFolder;
}
else if (typeof detail.workspaceValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Workspace;
}
else if (typeof detail.globalValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Global;
}
return configurationTarget;
}
export async function change(
settingName: string,
newValue: any,
configurationTarget?: vscode.ConfigurationTarget | boolean): Promise<void> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
await configuration.update(settingName, newValue, configurationTarget);
}
function getWorkspaceSettingsWithDefaults<TSettings>(
workspaceConfiguration: vscode.WorkspaceConfiguration,
settingName: string,
defaultSettings: TSettings): TSettings {
const importedSettings: TSettings = workspaceConfiguration.get<TSettings>(settingName, defaultSettings);
for (const setting in importedSettings) {
if (importedSettings[setting]) {
defaultSettings[setting] = importedSettings[setting];
}
}
return defaultSettings;
}
|
load
|
identifier_name
|
settings.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
"use strict";
import vscode = require("vscode");
import utils = require("./utils");
enum CodeFormattingPreset {
Custom,
Allman,
OTBS,
Stroustrup,
}
enum PipelineIndentationStyle {
IncreaseIndentationForFirstPipeline,
IncreaseIndentationAfterEveryPipeline,
NoIndentation,
None,
}
export enum CommentType {
Disabled = "Disabled",
BlockComment = "BlockComment",
LineComment = "LineComment",
}
export interface IPowerShellAdditionalExePathSettings {
versionName: string;
exePath: string;
}
export interface IBugReportingSettings {
project: string;
}
export interface ICodeFoldingSettings {
enable?: boolean;
showLastLine?: boolean;
}
export interface ICodeFormattingSettings {
autoCorrectAliases: boolean;
preset: CodeFormattingPreset;
openBraceOnSameLine: boolean;
newLineAfterOpenBrace: boolean;
newLineAfterCloseBrace: boolean;
pipelineIndentationStyle: PipelineIndentationStyle;
whitespaceBeforeOpenBrace: boolean;
whitespaceBeforeOpenParen: boolean;
whitespaceAroundOperator: boolean;
whitespaceAfterSeparator: boolean;
whitespaceBetweenParameters: boolean;
whitespaceInsideBrace: boolean;
addWhitespaceAroundPipe: boolean;
trimWhitespaceAroundPipe: boolean;
ignoreOneLineBlock: boolean;
alignPropertyValuePairs: boolean;
useConstantStrings: boolean;
useCorrectCasing: boolean;
}
export interface IScriptAnalysisSettings {
enable?: boolean;
settingsPath: string;
}
export interface IDebuggingSettings {
createTemporaryIntegratedConsole?: boolean;
}
export interface IDeveloperSettings {
featureFlags?: string[];
bundledModulesPath?: string;
editorServicesLogLevel?: string;
editorServicesWaitForDebugger?: boolean;
waitForSessionFileTimeoutSeconds?: number;
}
export interface ISettings {
powerShellAdditionalExePaths?: IPowerShellAdditionalExePathSettings[];
powerShellDefaultVersion?: string;
// This setting is no longer used but is here to assist in cleaning up the users settings.
powerShellExePath?: string;
promptToUpdatePowerShell?: boolean;
promptToUpdatePackageManagement?: boolean;
bundledModulesPath?: string;
startAsLoginShell?: IStartAsLoginShellSettings;
startAutomatically?: boolean;
useX86Host?: boolean;
enableProfileLoading?: boolean;
helpCompletion: string;
scriptAnalysis?: IScriptAnalysisSettings;
debugging?: IDebuggingSettings;
developer?: IDeveloperSettings;
codeFolding?: ICodeFoldingSettings;
codeFormatting?: ICodeFormattingSettings;
integratedConsole?: IIntegratedConsoleSettings;
bugReporting?: IBugReportingSettings;
sideBar?: ISideBarSettings;
pester?: IPesterSettings;
buttons?: IButtonSettings;
cwd?: string;
notebooks?: INotebooksSettings;
}
export interface IStartAsLoginShellSettings {
osx?: boolean;
linux?: boolean;
}
export interface IIntegratedConsoleSettings {
showOnStartup?: boolean;
focusConsoleOnExecute?: boolean;
useLegacyReadLine?: boolean;
forceClearScrollbackBuffer?: boolean;
suppressStartupBanner?: boolean;
}
export interface ISideBarSettings {
CommandExplorerVisibility?: boolean;
}
export interface IPesterSettings {
useLegacyCodeLens?: boolean;
outputVerbosity?: string;
debugOutputVerbosity?: string;
}
export interface IButtonSettings {
showRunButtons?: boolean;
showPanelMovementButtons?: boolean;
}
export interface INotebooksSettings {
saveMarkdownCellsAs?: CommentType;
}
export function load(): ISettings {
const configuration: vscode.WorkspaceConfiguration =
vscode.workspace.getConfiguration(
utils.PowerShellLanguageId);
const defaultBugReportingSettings: IBugReportingSettings = {
project: "https://github.com/PowerShell/vscode-powershell",
};
const defaultScriptAnalysisSettings: IScriptAnalysisSettings = {
enable: true,
settingsPath: "PSScriptAnalyzerSettings.psd1",
};
const defaultDebuggingSettings: IDebuggingSettings = {
createTemporaryIntegratedConsole: false,
};
const defaultDeveloperSettings: IDeveloperSettings = {
featureFlags: [],
// From `<root>/out/main.js` we go to the directory before <root> and
// then into the other repo.
bundledModulesPath: "../../PowerShellEditorServices/module",
editorServicesLogLevel: "Normal",
editorServicesWaitForDebugger: false,
waitForSessionFileTimeoutSeconds: 240,
};
const defaultCodeFoldingSettings: ICodeFoldingSettings = {
enable: true,
showLastLine: false,
};
const defaultCodeFormattingSettings: ICodeFormattingSettings = {
autoCorrectAliases: false,
preset: CodeFormattingPreset.Custom,
openBraceOnSameLine: true,
newLineAfterOpenBrace: true,
newLineAfterCloseBrace: true,
pipelineIndentationStyle: PipelineIndentationStyle.NoIndentation,
whitespaceBeforeOpenBrace: true,
whitespaceBeforeOpenParen: true,
whitespaceAroundOperator: true,
whitespaceAfterSeparator: true,
whitespaceBetweenParameters: false,
whitespaceInsideBrace: true,
addWhitespaceAroundPipe: true,
trimWhitespaceAroundPipe: false,
ignoreOneLineBlock: true,
alignPropertyValuePairs: true,
useConstantStrings: false,
useCorrectCasing: false,
};
const defaultStartAsLoginShellSettings: IStartAsLoginShellSettings = {
osx: true,
linux: false,
};
const defaultIntegratedConsoleSettings: IIntegratedConsoleSettings = {
showOnStartup: true,
focusConsoleOnExecute: true,
useLegacyReadLine: false,
forceClearScrollbackBuffer: false,
};
const defaultSideBarSettings: ISideBarSettings = {
CommandExplorerVisibility: true,
};
const defaultButtonSettings: IButtonSettings = {
showRunButtons: true,
showPanelMovementButtons: false
};
const defaultPesterSettings: IPesterSettings = {
useLegacyCodeLens: true,
outputVerbosity: "FromPreference",
debugOutputVerbosity: "Diagnostic",
};
const defaultNotebooksSettings: INotebooksSettings = {
saveMarkdownCellsAs: CommentType.BlockComment,
};
return {
startAutomatically:
configuration.get<boolean>("startAutomatically", true),
powerShellAdditionalExePaths:
configuration.get<IPowerShellAdditionalExePathSettings[]>("powerShellAdditionalExePaths", undefined),
powerShellDefaultVersion:
configuration.get<string>("powerShellDefaultVersion", undefined),
powerShellExePath:
configuration.get<string>("powerShellExePath", undefined),
promptToUpdatePowerShell:
configuration.get<boolean>("promptToUpdatePowerShell", true),
promptToUpdatePackageManagement:
configuration.get<boolean>("promptToUpdatePackageManagement", true),
bundledModulesPath:
"../modules", // Because the extension is always at `<root>/out/main.js`
useX86Host:
configuration.get<boolean>("useX86Host", false),
enableProfileLoading:
configuration.get<boolean>("enableProfileLoading", false),
helpCompletion:
configuration.get<string>("helpCompletion", CommentType.BlockComment),
scriptAnalysis:
configuration.get<IScriptAnalysisSettings>("scriptAnalysis", defaultScriptAnalysisSettings),
debugging:
configuration.get<IDebuggingSettings>("debugging", defaultDebuggingSettings),
developer:
getWorkspaceSettingsWithDefaults<IDeveloperSettings>(configuration, "developer", defaultDeveloperSettings),
codeFolding:
configuration.get<ICodeFoldingSettings>("codeFolding", defaultCodeFoldingSettings),
codeFormatting:
configuration.get<ICodeFormattingSettings>("codeFormatting", defaultCodeFormattingSettings),
integratedConsole:
configuration.get<IIntegratedConsoleSettings>("integratedConsole", defaultIntegratedConsoleSettings),
bugReporting:
configuration.get<IBugReportingSettings>("bugReporting", defaultBugReportingSettings),
sideBar:
configuration.get<ISideBarSettings>("sideBar", defaultSideBarSettings),
pester:
configuration.get<IPesterSettings>("pester", defaultPesterSettings),
buttons:
configuration.get<IButtonSettings>("buttons", defaultButtonSettings),
notebooks:
configuration.get<INotebooksSettings>("notebooks", defaultNotebooksSettings),
startAsLoginShell:
// tslint:disable-next-line
// We follow the same convention as VS Code - https://github.com/microsoft/vscode/blob/ff00badd955d6cfcb8eab5f25f3edc86b762f49f/src/vs/workbench/contrib/terminal/browser/terminal.contribution.ts#L105-L107
// "Unlike on Linux, ~/.profile is not sourced when logging into a macOS session. This
// is the reason terminals on macOS typically run login shells by default which set up
// the environment. See http://unix.stackexchange.com/a/119675/115410"
configuration.get<IStartAsLoginShellSettings>("startAsLoginShell", defaultStartAsLoginShellSettings),
cwd:
configuration.get<string>("cwd", null),
};
}
// Get the ConfigurationTarget (read: scope) of where the *effective* setting value comes from
export async function getEffectiveConfigurationTarget(settingName: string): Promise<vscode.ConfigurationTarget> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
const detail = configuration.inspect(settingName);
let configurationTarget = null;
if (typeof detail.workspaceFolderValue !== "undefined")
|
else if (typeof detail.workspaceValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Workspace;
}
else if (typeof detail.globalValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Global;
}
return configurationTarget;
}
export async function change(
settingName: string,
newValue: any,
configurationTarget?: vscode.ConfigurationTarget | boolean): Promise<void> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
await configuration.update(settingName, newValue, configurationTarget);
}
function getWorkspaceSettingsWithDefaults<TSettings>(
workspaceConfiguration: vscode.WorkspaceConfiguration,
settingName: string,
defaultSettings: TSettings): TSettings {
const importedSettings: TSettings = workspaceConfiguration.get<TSettings>(settingName, defaultSettings);
for (const setting in importedSettings) {
if (importedSettings[setting]) {
defaultSettings[setting] = importedSettings[setting];
}
}
return defaultSettings;
}
|
{
configurationTarget = vscode.ConfigurationTarget.WorkspaceFolder;
}
|
conditional_block
|
settings.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
"use strict";
import vscode = require("vscode");
import utils = require("./utils");
enum CodeFormattingPreset {
Custom,
Allman,
OTBS,
Stroustrup,
}
enum PipelineIndentationStyle {
IncreaseIndentationForFirstPipeline,
IncreaseIndentationAfterEveryPipeline,
NoIndentation,
None,
}
export enum CommentType {
Disabled = "Disabled",
BlockComment = "BlockComment",
LineComment = "LineComment",
}
export interface IPowerShellAdditionalExePathSettings {
versionName: string;
exePath: string;
}
export interface IBugReportingSettings {
project: string;
}
export interface ICodeFoldingSettings {
enable?: boolean;
showLastLine?: boolean;
}
export interface ICodeFormattingSettings {
autoCorrectAliases: boolean;
preset: CodeFormattingPreset;
openBraceOnSameLine: boolean;
newLineAfterOpenBrace: boolean;
newLineAfterCloseBrace: boolean;
pipelineIndentationStyle: PipelineIndentationStyle;
whitespaceBeforeOpenBrace: boolean;
whitespaceBeforeOpenParen: boolean;
whitespaceAroundOperator: boolean;
whitespaceAfterSeparator: boolean;
whitespaceBetweenParameters: boolean;
whitespaceInsideBrace: boolean;
addWhitespaceAroundPipe: boolean;
trimWhitespaceAroundPipe: boolean;
ignoreOneLineBlock: boolean;
alignPropertyValuePairs: boolean;
useConstantStrings: boolean;
useCorrectCasing: boolean;
}
export interface IScriptAnalysisSettings {
enable?: boolean;
settingsPath: string;
}
export interface IDebuggingSettings {
createTemporaryIntegratedConsole?: boolean;
}
export interface IDeveloperSettings {
featureFlags?: string[];
bundledModulesPath?: string;
editorServicesLogLevel?: string;
editorServicesWaitForDebugger?: boolean;
waitForSessionFileTimeoutSeconds?: number;
}
export interface ISettings {
powerShellAdditionalExePaths?: IPowerShellAdditionalExePathSettings[];
powerShellDefaultVersion?: string;
// This setting is no longer used but is here to assist in cleaning up the users settings.
powerShellExePath?: string;
promptToUpdatePowerShell?: boolean;
promptToUpdatePackageManagement?: boolean;
bundledModulesPath?: string;
startAsLoginShell?: IStartAsLoginShellSettings;
startAutomatically?: boolean;
useX86Host?: boolean;
enableProfileLoading?: boolean;
helpCompletion: string;
scriptAnalysis?: IScriptAnalysisSettings;
debugging?: IDebuggingSettings;
developer?: IDeveloperSettings;
codeFolding?: ICodeFoldingSettings;
codeFormatting?: ICodeFormattingSettings;
integratedConsole?: IIntegratedConsoleSettings;
bugReporting?: IBugReportingSettings;
sideBar?: ISideBarSettings;
pester?: IPesterSettings;
buttons?: IButtonSettings;
cwd?: string;
notebooks?: INotebooksSettings;
}
export interface IStartAsLoginShellSettings {
osx?: boolean;
linux?: boolean;
}
export interface IIntegratedConsoleSettings {
showOnStartup?: boolean;
focusConsoleOnExecute?: boolean;
useLegacyReadLine?: boolean;
forceClearScrollbackBuffer?: boolean;
suppressStartupBanner?: boolean;
}
export interface ISideBarSettings {
CommandExplorerVisibility?: boolean;
}
export interface IPesterSettings {
useLegacyCodeLens?: boolean;
outputVerbosity?: string;
debugOutputVerbosity?: string;
}
export interface IButtonSettings {
showRunButtons?: boolean;
showPanelMovementButtons?: boolean;
}
export interface INotebooksSettings {
saveMarkdownCellsAs?: CommentType;
}
export function load(): ISettings {
const configuration: vscode.WorkspaceConfiguration =
vscode.workspace.getConfiguration(
utils.PowerShellLanguageId);
const defaultBugReportingSettings: IBugReportingSettings = {
project: "https://github.com/PowerShell/vscode-powershell",
};
const defaultScriptAnalysisSettings: IScriptAnalysisSettings = {
enable: true,
settingsPath: "PSScriptAnalyzerSettings.psd1",
};
const defaultDebuggingSettings: IDebuggingSettings = {
createTemporaryIntegratedConsole: false,
};
const defaultDeveloperSettings: IDeveloperSettings = {
featureFlags: [],
// From `<root>/out/main.js` we go to the directory before <root> and
// then into the other repo.
bundledModulesPath: "../../PowerShellEditorServices/module",
editorServicesLogLevel: "Normal",
editorServicesWaitForDebugger: false,
waitForSessionFileTimeoutSeconds: 240,
};
const defaultCodeFoldingSettings: ICodeFoldingSettings = {
enable: true,
showLastLine: false,
};
const defaultCodeFormattingSettings: ICodeFormattingSettings = {
autoCorrectAliases: false,
preset: CodeFormattingPreset.Custom,
openBraceOnSameLine: true,
newLineAfterOpenBrace: true,
newLineAfterCloseBrace: true,
pipelineIndentationStyle: PipelineIndentationStyle.NoIndentation,
whitespaceBeforeOpenBrace: true,
whitespaceBeforeOpenParen: true,
whitespaceAroundOperator: true,
whitespaceAfterSeparator: true,
whitespaceBetweenParameters: false,
whitespaceInsideBrace: true,
addWhitespaceAroundPipe: true,
trimWhitespaceAroundPipe: false,
ignoreOneLineBlock: true,
alignPropertyValuePairs: true,
useConstantStrings: false,
useCorrectCasing: false,
};
const defaultStartAsLoginShellSettings: IStartAsLoginShellSettings = {
osx: true,
linux: false,
};
const defaultIntegratedConsoleSettings: IIntegratedConsoleSettings = {
showOnStartup: true,
focusConsoleOnExecute: true,
useLegacyReadLine: false,
forceClearScrollbackBuffer: false,
};
const defaultSideBarSettings: ISideBarSettings = {
CommandExplorerVisibility: true,
};
const defaultButtonSettings: IButtonSettings = {
showRunButtons: true,
showPanelMovementButtons: false
};
const defaultPesterSettings: IPesterSettings = {
useLegacyCodeLens: true,
outputVerbosity: "FromPreference",
debugOutputVerbosity: "Diagnostic",
};
const defaultNotebooksSettings: INotebooksSettings = {
saveMarkdownCellsAs: CommentType.BlockComment,
};
return {
startAutomatically:
configuration.get<boolean>("startAutomatically", true),
powerShellAdditionalExePaths:
configuration.get<IPowerShellAdditionalExePathSettings[]>("powerShellAdditionalExePaths", undefined),
powerShellDefaultVersion:
configuration.get<string>("powerShellDefaultVersion", undefined),
powerShellExePath:
configuration.get<string>("powerShellExePath", undefined),
promptToUpdatePowerShell:
configuration.get<boolean>("promptToUpdatePowerShell", true),
promptToUpdatePackageManagement:
configuration.get<boolean>("promptToUpdatePackageManagement", true),
bundledModulesPath:
"../modules", // Because the extension is always at `<root>/out/main.js`
useX86Host:
configuration.get<boolean>("useX86Host", false),
enableProfileLoading:
configuration.get<boolean>("enableProfileLoading", false),
helpCompletion:
configuration.get<string>("helpCompletion", CommentType.BlockComment),
scriptAnalysis:
configuration.get<IScriptAnalysisSettings>("scriptAnalysis", defaultScriptAnalysisSettings),
debugging:
configuration.get<IDebuggingSettings>("debugging", defaultDebuggingSettings),
developer:
getWorkspaceSettingsWithDefaults<IDeveloperSettings>(configuration, "developer", defaultDeveloperSettings),
codeFolding:
configuration.get<ICodeFoldingSettings>("codeFolding", defaultCodeFoldingSettings),
codeFormatting:
configuration.get<ICodeFormattingSettings>("codeFormatting", defaultCodeFormattingSettings),
integratedConsole:
configuration.get<IIntegratedConsoleSettings>("integratedConsole", defaultIntegratedConsoleSettings),
bugReporting:
configuration.get<IBugReportingSettings>("bugReporting", defaultBugReportingSettings),
sideBar:
configuration.get<ISideBarSettings>("sideBar", defaultSideBarSettings),
pester:
configuration.get<IPesterSettings>("pester", defaultPesterSettings),
buttons:
configuration.get<IButtonSettings>("buttons", defaultButtonSettings),
notebooks:
configuration.get<INotebooksSettings>("notebooks", defaultNotebooksSettings),
startAsLoginShell:
// tslint:disable-next-line
// We follow the same convention as VS Code - https://github.com/microsoft/vscode/blob/ff00badd955d6cfcb8eab5f25f3edc86b762f49f/src/vs/workbench/contrib/terminal/browser/terminal.contribution.ts#L105-L107
// "Unlike on Linux, ~/.profile is not sourced when logging into a macOS session. This
// is the reason terminals on macOS typically run login shells by default which set up
// the environment. See http://unix.stackexchange.com/a/119675/115410"
configuration.get<IStartAsLoginShellSettings>("startAsLoginShell", defaultStartAsLoginShellSettings),
cwd:
configuration.get<string>("cwd", null),
};
}
// Get the ConfigurationTarget (read: scope) of where the *effective* setting value comes from
export async function getEffectiveConfigurationTarget(settingName: string): Promise<vscode.ConfigurationTarget>
|
export async function change(
settingName: string,
newValue: any,
configurationTarget?: vscode.ConfigurationTarget | boolean): Promise<void> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
await configuration.update(settingName, newValue, configurationTarget);
}
function getWorkspaceSettingsWithDefaults<TSettings>(
workspaceConfiguration: vscode.WorkspaceConfiguration,
settingName: string,
defaultSettings: TSettings): TSettings {
const importedSettings: TSettings = workspaceConfiguration.get<TSettings>(settingName, defaultSettings);
for (const setting in importedSettings) {
if (importedSettings[setting]) {
defaultSettings[setting] = importedSettings[setting];
}
}
return defaultSettings;
}
|
{
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
const detail = configuration.inspect(settingName);
let configurationTarget = null;
if (typeof detail.workspaceFolderValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.WorkspaceFolder;
}
else if (typeof detail.workspaceValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Workspace;
}
else if (typeof detail.globalValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Global;
}
return configurationTarget;
}
|
identifier_body
|
settings.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
"use strict";
import vscode = require("vscode");
import utils = require("./utils");
enum CodeFormattingPreset {
Custom,
Allman,
OTBS,
Stroustrup,
}
enum PipelineIndentationStyle {
IncreaseIndentationForFirstPipeline,
IncreaseIndentationAfterEveryPipeline,
NoIndentation,
None,
}
export enum CommentType {
Disabled = "Disabled",
BlockComment = "BlockComment",
LineComment = "LineComment",
}
export interface IPowerShellAdditionalExePathSettings {
versionName: string;
exePath: string;
}
export interface IBugReportingSettings {
project: string;
}
export interface ICodeFoldingSettings {
enable?: boolean;
showLastLine?: boolean;
}
export interface ICodeFormattingSettings {
autoCorrectAliases: boolean;
preset: CodeFormattingPreset;
openBraceOnSameLine: boolean;
newLineAfterOpenBrace: boolean;
newLineAfterCloseBrace: boolean;
pipelineIndentationStyle: PipelineIndentationStyle;
whitespaceBeforeOpenBrace: boolean;
whitespaceBeforeOpenParen: boolean;
whitespaceAroundOperator: boolean;
whitespaceAfterSeparator: boolean;
whitespaceBetweenParameters: boolean;
whitespaceInsideBrace: boolean;
addWhitespaceAroundPipe: boolean;
trimWhitespaceAroundPipe: boolean;
ignoreOneLineBlock: boolean;
alignPropertyValuePairs: boolean;
useConstantStrings: boolean;
useCorrectCasing: boolean;
}
export interface IScriptAnalysisSettings {
enable?: boolean;
settingsPath: string;
}
export interface IDebuggingSettings {
createTemporaryIntegratedConsole?: boolean;
}
export interface IDeveloperSettings {
featureFlags?: string[];
bundledModulesPath?: string;
editorServicesLogLevel?: string;
editorServicesWaitForDebugger?: boolean;
waitForSessionFileTimeoutSeconds?: number;
}
export interface ISettings {
powerShellAdditionalExePaths?: IPowerShellAdditionalExePathSettings[];
powerShellDefaultVersion?: string;
// This setting is no longer used but is here to assist in cleaning up the users settings.
powerShellExePath?: string;
promptToUpdatePowerShell?: boolean;
promptToUpdatePackageManagement?: boolean;
bundledModulesPath?: string;
startAsLoginShell?: IStartAsLoginShellSettings;
startAutomatically?: boolean;
useX86Host?: boolean;
enableProfileLoading?: boolean;
helpCompletion: string;
scriptAnalysis?: IScriptAnalysisSettings;
debugging?: IDebuggingSettings;
developer?: IDeveloperSettings;
codeFolding?: ICodeFoldingSettings;
codeFormatting?: ICodeFormattingSettings;
integratedConsole?: IIntegratedConsoleSettings;
bugReporting?: IBugReportingSettings;
sideBar?: ISideBarSettings;
pester?: IPesterSettings;
buttons?: IButtonSettings;
cwd?: string;
notebooks?: INotebooksSettings;
}
export interface IStartAsLoginShellSettings {
osx?: boolean;
linux?: boolean;
}
export interface IIntegratedConsoleSettings {
showOnStartup?: boolean;
focusConsoleOnExecute?: boolean;
useLegacyReadLine?: boolean;
forceClearScrollbackBuffer?: boolean;
suppressStartupBanner?: boolean;
}
export interface ISideBarSettings {
CommandExplorerVisibility?: boolean;
}
export interface IPesterSettings {
useLegacyCodeLens?: boolean;
outputVerbosity?: string;
debugOutputVerbosity?: string;
}
export interface IButtonSettings {
showRunButtons?: boolean;
showPanelMovementButtons?: boolean;
}
export interface INotebooksSettings {
saveMarkdownCellsAs?: CommentType;
}
export function load(): ISettings {
const configuration: vscode.WorkspaceConfiguration =
vscode.workspace.getConfiguration(
utils.PowerShellLanguageId);
const defaultBugReportingSettings: IBugReportingSettings = {
project: "https://github.com/PowerShell/vscode-powershell",
};
const defaultScriptAnalysisSettings: IScriptAnalysisSettings = {
enable: true,
settingsPath: "PSScriptAnalyzerSettings.psd1",
};
const defaultDebuggingSettings: IDebuggingSettings = {
createTemporaryIntegratedConsole: false,
};
const defaultDeveloperSettings: IDeveloperSettings = {
featureFlags: [],
// From `<root>/out/main.js` we go to the directory before <root> and
// then into the other repo.
bundledModulesPath: "../../PowerShellEditorServices/module",
editorServicesLogLevel: "Normal",
editorServicesWaitForDebugger: false,
waitForSessionFileTimeoutSeconds: 240,
};
const defaultCodeFoldingSettings: ICodeFoldingSettings = {
enable: true,
showLastLine: false,
};
const defaultCodeFormattingSettings: ICodeFormattingSettings = {
autoCorrectAliases: false,
preset: CodeFormattingPreset.Custom,
openBraceOnSameLine: true,
newLineAfterOpenBrace: true,
newLineAfterCloseBrace: true,
pipelineIndentationStyle: PipelineIndentationStyle.NoIndentation,
whitespaceBeforeOpenBrace: true,
whitespaceBeforeOpenParen: true,
whitespaceAroundOperator: true,
whitespaceAfterSeparator: true,
whitespaceBetweenParameters: false,
whitespaceInsideBrace: true,
addWhitespaceAroundPipe: true,
trimWhitespaceAroundPipe: false,
ignoreOneLineBlock: true,
alignPropertyValuePairs: true,
useConstantStrings: false,
useCorrectCasing: false,
};
const defaultStartAsLoginShellSettings: IStartAsLoginShellSettings = {
osx: true,
linux: false,
};
const defaultIntegratedConsoleSettings: IIntegratedConsoleSettings = {
showOnStartup: true,
focusConsoleOnExecute: true,
useLegacyReadLine: false,
forceClearScrollbackBuffer: false,
};
const defaultSideBarSettings: ISideBarSettings = {
CommandExplorerVisibility: true,
};
const defaultButtonSettings: IButtonSettings = {
showRunButtons: true,
showPanelMovementButtons: false
};
const defaultPesterSettings: IPesterSettings = {
useLegacyCodeLens: true,
outputVerbosity: "FromPreference",
debugOutputVerbosity: "Diagnostic",
};
const defaultNotebooksSettings: INotebooksSettings = {
saveMarkdownCellsAs: CommentType.BlockComment,
};
return {
startAutomatically:
configuration.get<boolean>("startAutomatically", true),
powerShellAdditionalExePaths:
configuration.get<IPowerShellAdditionalExePathSettings[]>("powerShellAdditionalExePaths", undefined),
powerShellDefaultVersion:
configuration.get<string>("powerShellDefaultVersion", undefined),
powerShellExePath:
configuration.get<string>("powerShellExePath", undefined),
promptToUpdatePowerShell:
configuration.get<boolean>("promptToUpdatePowerShell", true),
promptToUpdatePackageManagement:
configuration.get<boolean>("promptToUpdatePackageManagement", true),
bundledModulesPath:
"../modules", // Because the extension is always at `<root>/out/main.js`
useX86Host:
configuration.get<boolean>("useX86Host", false),
enableProfileLoading:
configuration.get<boolean>("enableProfileLoading", false),
helpCompletion:
configuration.get<string>("helpCompletion", CommentType.BlockComment),
|
developer:
getWorkspaceSettingsWithDefaults<IDeveloperSettings>(configuration, "developer", defaultDeveloperSettings),
codeFolding:
configuration.get<ICodeFoldingSettings>("codeFolding", defaultCodeFoldingSettings),
codeFormatting:
configuration.get<ICodeFormattingSettings>("codeFormatting", defaultCodeFormattingSettings),
integratedConsole:
configuration.get<IIntegratedConsoleSettings>("integratedConsole", defaultIntegratedConsoleSettings),
bugReporting:
configuration.get<IBugReportingSettings>("bugReporting", defaultBugReportingSettings),
sideBar:
configuration.get<ISideBarSettings>("sideBar", defaultSideBarSettings),
pester:
configuration.get<IPesterSettings>("pester", defaultPesterSettings),
buttons:
configuration.get<IButtonSettings>("buttons", defaultButtonSettings),
notebooks:
configuration.get<INotebooksSettings>("notebooks", defaultNotebooksSettings),
startAsLoginShell:
// tslint:disable-next-line
// We follow the same convention as VS Code - https://github.com/microsoft/vscode/blob/ff00badd955d6cfcb8eab5f25f3edc86b762f49f/src/vs/workbench/contrib/terminal/browser/terminal.contribution.ts#L105-L107
// "Unlike on Linux, ~/.profile is not sourced when logging into a macOS session. This
// is the reason terminals on macOS typically run login shells by default which set up
// the environment. See http://unix.stackexchange.com/a/119675/115410"
configuration.get<IStartAsLoginShellSettings>("startAsLoginShell", defaultStartAsLoginShellSettings),
cwd:
configuration.get<string>("cwd", null),
};
}
// Get the ConfigurationTarget (read: scope) of where the *effective* setting value comes from
export async function getEffectiveConfigurationTarget(settingName: string): Promise<vscode.ConfigurationTarget> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
const detail = configuration.inspect(settingName);
let configurationTarget = null;
if (typeof detail.workspaceFolderValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.WorkspaceFolder;
}
else if (typeof detail.workspaceValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Workspace;
}
else if (typeof detail.globalValue !== "undefined") {
configurationTarget = vscode.ConfigurationTarget.Global;
}
return configurationTarget;
}
export async function change(
settingName: string,
newValue: any,
configurationTarget?: vscode.ConfigurationTarget | boolean): Promise<void> {
const configuration = vscode.workspace.getConfiguration(utils.PowerShellLanguageId);
await configuration.update(settingName, newValue, configurationTarget);
}
function getWorkspaceSettingsWithDefaults<TSettings>(
workspaceConfiguration: vscode.WorkspaceConfiguration,
settingName: string,
defaultSettings: TSettings): TSettings {
const importedSettings: TSettings = workspaceConfiguration.get<TSettings>(settingName, defaultSettings);
for (const setting in importedSettings) {
if (importedSettings[setting]) {
defaultSettings[setting] = importedSettings[setting];
}
}
return defaultSettings;
}
|
scriptAnalysis:
configuration.get<IScriptAnalysisSettings>("scriptAnalysis", defaultScriptAnalysisSettings),
debugging:
configuration.get<IDebuggingSettings>("debugging", defaultDebuggingSettings),
|
random_line_split
|
Calculator.ts
|
/**
* A class that implements a simple Calculator
*/
export class Calculator {
/**
* Adds the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public add(a: number, b: number): number {
return a + b;
}
/**
* Subtracts second number from first number and returns the result
* @param a First number
* @param b Second number
*/
public
|
(a: number, b: number): number {
return a - b;
}
/**
* Multiplies the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public multiply(a: number, b: number): number {
return a * b;
}
/**
* Divides the first number by the second number and returns the result
* @param a First number
* @param b Second number
*/
public divide(a: number, b: number): number {
return a / b;
}
}
|
subtract
|
identifier_name
|
Calculator.ts
|
/**
* A class that implements a simple Calculator
*/
export class Calculator {
/**
* Adds the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public add(a: number, b: number): number {
|
* @param a First number
* @param b Second number
*/
public subtract(a: number, b: number): number {
return a - b;
}
/**
* Multiplies the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public multiply(a: number, b: number): number {
return a * b;
}
/**
* Divides the first number by the second number and returns the result
* @param a First number
* @param b Second number
*/
public divide(a: number, b: number): number {
return a / b;
}
}
|
return a + b;
}
/**
* Subtracts second number from first number and returns the result
|
random_line_split
|
Calculator.ts
|
/**
* A class that implements a simple Calculator
*/
export class Calculator {
/**
* Adds the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public add(a: number, b: number): number {
return a + b;
}
/**
* Subtracts second number from first number and returns the result
* @param a First number
* @param b Second number
*/
public subtract(a: number, b: number): number
|
/**
* Multiplies the provided numbers and returns the result
* @param a First number
* @param b Second number
*/
public multiply(a: number, b: number): number {
return a * b;
}
/**
* Divides the first number by the second number and returns the result
* @param a First number
* @param b Second number
*/
public divide(a: number, b: number): number {
return a / b;
}
}
|
{
return a - b;
}
|
identifier_body
|
webpack.local.config.js
|
var path = require("path")
var webpack = require('webpack')
var BundleTracker = require('webpack-bundle-tracker')
var config = require('./webpack.base.config.js')
// Use webpack dev server
config.entry = [
'webpack-dev-server/client?http://localhost:3000',
'webpack/hot/only-dev-server',
'./assets/js/index'
]
// override django's STATIC_URL for webpack bundles
config.output.publicPath = 'http://localhost:3000/assets/bundles/'
|
])
// Add a loader for JSX files with react-hot enabled
config.module.loaders.push(
{ test: /\.jsx?$/, exclude: /node_modules/, loaders: ['react-hot', 'babel'] }
)
module.exports = config
|
// Add HotModuleReplacementPlugin and BundleTracker plugins
config.plugins = config.plugins.concat([
new webpack.HotModuleReplacementPlugin(),
new webpack.NoErrorsPlugin(),
new BundleTracker({filename: './webpack-stats.json'}),
|
random_line_split
|
product_spec.js
|
(function() {
"use strict";
var image, master, product, store, variants;
module('App.Product', {
setup: function() {
Ember.run(function() {
store = dataStore();
product = store.find('product', 'some-shirt');
});
},
teardown: function() {
store = undefined;
product = undefined;
}
});
// ---------------------------------------------------------
// Relationships
// ---------------------------------------------------------
test('#master', function() {
Ember.run(function() {
master = product.get('master');
});
equal(master.get('id'), 'shirt-master');
});
test('#variants', function() {
Ember.run(function() {
variants = product.get('variants');
});
equal(variants.get('length'), 2);
});
// ---------------------------------------------------------
// Attributes
// ---------------------------------------------------------
test('#availableOn', function() {
ok(product.get('availableOn').isSame(moment('January 1, 2014', 'MMMM D, YYYY')));
});
test('#description', function() {
equal(product.get('description'), 'Some description about some shirt.');
});
test('#displayPrice', function() {
equal(product.get('price'), 24.99);
});
test('#metaDescription', function() {
equal(product.get('metaDescription'), 'This shirt is awesome!');
});
test('#metaKeywords', function() {
equal(product.get('metaKeywords'), 'some, shirt, tshirt, codelation');
});
test('#name', function() {
equal(product.get('name'), 'Some Shirt');
});
test('#permalink', function() {
|
equal(product.get('permalink'), 'some-shirt');
});
test('#price', function() {
equal(product.get('price'), 24.99);
});
// ---------------------------------------------------------
// Computed Properties
// ---------------------------------------------------------
test('#masterImage', function() {
Ember.run(function() {
image = product.get('masterImage');
});
equal(image.get('id'), 'shirt-front');
});
})();
|
random_line_split
|
|
Changelog.tsx
|
import CORE_CHANGELOG from 'CHANGELOG';
import { ChangelogEntry } from 'common/changelog';
import Contributor from 'interface/ContributorButton';
import ReadableListing from 'interface/ReadableListing';
interface Props {
changelog: ChangelogEntry[];
limit?: number;
includeCore?: boolean;
|
const mergedChangelog: ChangelogEntry[] = includeCore
? [...CORE_CHANGELOG, ...changelog].sort(
(a: ChangelogEntry, b: ChangelogEntry) => b.date.getTime() - a.date.getTime(),
)
: changelog;
return (
<ul className="list text">
{mergedChangelog
.filter((_, i) => !limit || i < limit)
.map((entry) => {
const { date, changes, contributors } = entry;
const isFromCoreChangelog = CORE_CHANGELOG.includes(entry);
// The index of the entry provides us with a unique never changing key, which speeds up the Shared Changes toggle
const index = isFromCoreChangelog
? CORE_CHANGELOG.indexOf(entry)
: changelog.indexOf(entry);
return (
<li
key={isFromCoreChangelog ? `core-${index}` : `spec-${index}`}
className={`flex wrapable ${includeCore && isFromCoreChangelog ? 'text-muted' : ''}`}
>
<div className="flex-sub" style={{ minWidth: 100, paddingRight: 15 }}>
{date.toLocaleDateString()}
</div>
<div className="flex-main" style={{ minWidth: 200 }}>
{changes}
</div>
<div
className="flex-sub"
style={{ minWidth: 150, paddingLeft: 15, textAlign: 'right' }}
>
<ReadableListing>
{contributors.map((contributor) => (
<Contributor key={contributor.nickname} {...contributor} />
))}
</ReadableListing>
</div>
</li>
);
})}
</ul>
);
};
export default Changelog;
|
}
const Changelog = ({ changelog, limit, includeCore = true }: Props) => {
|
random_line_split
|
diffie_hellman.rs
|
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while !exp.is_zero() {
if exp.is_odd()
|
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
}
|
{
result = (result * &base) % modulus;
}
|
conditional_block
|
diffie_hellman.rs
|
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
});
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while !exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
}
|
])
|
random_line_split
|
diffie_hellman.rs
|
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn
|
(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while !exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
}
|
powm
|
identifier_name
|
diffie_hellman.rs
|
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while !exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8>
|
}
|
{
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
|
identifier_body
|
dnc.py
|
#! /usr/bin/env python
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia [email protected]
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# CHANGELOG
# 0.6
# - Added some more chars to the command injection prevention.
# - Clients decide the nmap scanning rate.
# - If the server sends a --min-rate parameter, we now delete it. WE control the scan speed.
# - Clients decide the nmap scanning rate.
# - Exit if nmap is not installed
# - Stop sending the euid, it was a privacy violation. Now we just say if we are root or not.
#
# TODO
# - privileges on nmap
#
try:
from OpenSSL import SSL
except:
print 'You need openssl libs for python. apt-get install python-openssl'
exit(-1)
import sys
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print 'You need twisted libs for python. apt-get install python-twisted'
exit(-1)
import time, getopt, shlex
from subprocess import Popen
from subprocess import PIPE
import os
import random
# Global variables
server_ip = False
server_port = 46001
vernum = '0.6'
# Your name alias defaults to anonymous
alias='Anonymous'
debug=False
# Do not use a max rate by default
maxrate = False
# End global variables
# Print version information and exit
def version():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
# Print help information and exit:
def usage():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -s, --server-ip IP address of dnmap server."
print " -p, --server-port Port of dnmap server. Dnmap port defaults to 46001"
print " -a, --alias Your name alias so we can give credit to you for your help. Optional"
print " -d, --debug Debuging."
print " -m, --max-rate Force nmaps commands to use at most this rate. Useful to slow nmap down. Adds the --max-rate parameter."
print
sys.exit(1)
def check_clean(line):
global debug
try:
outbound_chars = [';', '#', '`']
ret = True
for char in outbound_chars:
if char in line:
ret = False
return ret
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global client_id
global alias
global debug
print 'Client connected succesfully...'
print 'Waiting for more commands....'
if debug:
print ' -- Your client ID is: {0} , and your alias is: {1}'.format(str(client_id), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid==0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(client_id),str(alias),vernum,iamroot)
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global client_id
global alias
# If a wait is received. just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines or CR nor LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
except IndexError:
random_file_name = str(random.randrange(0, 100000000, 1))
print '+ No -oA given. We add it anyway so not to lose the results. Added -oA ' + random_file_name
line = line + '-oA ' + random_file_name
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
try:
nmap_returncode = -1
# Check for rate commands
# Verfiy that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDACE
if 'min-rate' in line:
temp_vect = shlex.split(line)
word_index = temp_vect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmap_command = temp_vect[0:word_index] + temp_vect[word_index + 1:]
else:
nmap_command = shlex.split(line)
# Do we have to add a max-rate parameter?
if maxrate:
nmap_command.append('--max-rate')
nmap_command.append(str((maxrate)))
# Strip the command, so we can controll that only nmap is executed really
nmap_command = nmap_command[1:]
nmap_command.insert(0, 'nmap')
# Recreate the final command to show it
nmap_command_string = ''
for i in nmap_command:
|
nmap_process = Popen(nmap_command, stdout=PIPE)
raw_nmap_output = nmap_process.communicate()[0]
nmap_returncode = nmap_process.returncode
except OSError:
print 'You don\'t have nmap installed. You can install it with apt-get install nmap'
exit(-1)
except ValueError:
raw_nmap_output = 'Invalid nmap arguments.'
print raw_nmap_output
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
if nmap_returncode >= 0:
# Nmap ended ok and the files were created
if os.path.isfile(nmap_output_file + ".xml") and os.path.isfile(
nmap_output_file + ".gnmap") and os.path.isfile(nmap_output_file + ".nmap"):
with open(nmap_output_file + ".xml", "r") as f:
XMLData = f.read()
with open(nmap_output_file + ".gnmap", "r") as f:
GNmapData = f.read()
with open(nmap_output_file + ".nmap", "r") as f:
NmapData = f.read()
xml_linesep = "\r\n#XMLOUTPUT#\r\n"
gnmap_linesep = "\r\n#GNMAPOUTPUT#\r\n"
# Tell the server that we are sending the nmap output
print '\tSending output to the server...'
line = 'Nmap Output File:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
line = NmapData + xml_linesep + XMLData + gnmap_linesep + GNmapData
# line = raw_nmap_output + xml_linesep + XMLData + gnmap_linesep + GNmapData
print 'GNmapData: {}'.format(len(NmapData))
print 'xml_linesep: {}'.format(len(xml_linesep))
print 'XMLData: {}'.format(len(XMLData))
print 'gnmap_linesep: {}'.format(len(gnmap_linesep))
print 'GNmapData: {}'.format(len(GNmapData))
self.sendLine(line)
if debug:
print ' -- Line sent: {0}'.format(line)
line = 'Nmap Output Finished:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
# Move nmap output files to its directory
os.system('mv *.nmap nmap_output > /dev/null 2>&1')
os.system('mv *.gnmap nmap_output > /dev/null 2>&1')
os.system('mv *.xml nmap_output > /dev/null 2>&1')
# Ask for another command.
# 'Client ID' text must be sent to receive another command
print 'Waiting for more commands....'
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# Something strange was sent to us...
print
print 'WARNING! Ignoring some strange command was sent to us: {0}'.format(line)
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
class NmapClientFactory(ReconnectingClientFactory):
try:
protocol = NmapClient
def startedConnecting(self, connector):
print 'Starting connection...'
def clientConnectionFailed(self, connector, reason):
print 'Connection failed:', reason.getErrorMessage()
# Try to reconnect
print 'Trying to reconnect. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
print 'Connection lost. Reason: {0}'.format(reason.getErrorMessage())
# Try to reconnect
print 'Trying to reconnect in 10 secs. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
except Exception as inst:
print 'Problem in NmapClientFactory'
print type(inst)
print inst.args
print inst
def process_commands():
global server_ip
global server_port
global client_id
global factory
try:
print 'Client Started...'
# Generate the client unique ID
client_id = str(random.randrange(0, 100000000, 1))
# Create the output directory
print 'Nmap output files stored in \'nmap_output\' directory...'
os.system('mkdir nmap_output > /dev/null 2>&1')
factory = NmapClientFactory()
# Do not wait more that 10 seconds between reconnections
factory.maxDelay = 2
reactor.connectSSL(str(server_ip), int(server_port), factory, ssl.ClientContextFactory())
#reactor.addSystemEventTrigger('before','shutdown',myCleanUpFunction)
reactor.run()
except Exception as inst:
print 'Problem in process_commands function'
print type(inst)
print inst.args
print inst
def main():
global server_ip
global server_port
global alias
global debug
global maxrate
try:
opts, args = getopt.getopt(sys.argv[1:], "a:dm:p:s:", ["server-ip=","server-port","max-rate","alias=","debug"])
except getopt.GetoptError: usage()
for opt, arg in opts:
if opt in ("-s", "--server-ip"): server_ip=str(arg)
if opt in ("-p", "--server-port"): server_port=arg
if opt in ("-a", "--alias"): alias=str(arg).strip('\n').strip('\r').strip(' ')
if opt in ("-d", "--debug"): debug=True
if opt in ("-m", "--max-rate"): maxrate=str(arg)
try:
if server_ip and server_port:
version()
# Start connecting
process_commands()
else:
usage()
except KeyboardInterrupt:
# CTRL-C pretty handling.
print "Keyboard Interruption!. Exiting."
sys.exit(1)
if __name__ == '__main__':
main()
|
nmap_command_string = nmap_command_string + i + ' '
print "\tCommand Executed: {0}".format(nmap_command_string)
# For some reason this executable thing does not work! seems to change nmap sP for sS
# nmap_process = Popen(nmap_command,executable='nmap',stdout=PIPE)
|
random_line_split
|
dnc.py
|
#! /usr/bin/env python
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia [email protected]
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# CHANGELOG
# 0.6
# - Added some more chars to the command injection prevention.
# - Clients decide the nmap scanning rate.
# - If the server sends a --min-rate parameter, we now delete it. WE control the scan speed.
# - Clients decide the nmap scanning rate.
# - Exit if nmap is not installed
# - Stop sending the euid, it was a privacy violation. Now we just say if we are root or not.
#
# TODO
# - privileges on nmap
#
try:
from OpenSSL import SSL
except:
print 'You need openssl libs for python. apt-get install python-openssl'
exit(-1)
import sys
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print 'You need twisted libs for python. apt-get install python-twisted'
exit(-1)
import time, getopt, shlex
from subprocess import Popen
from subprocess import PIPE
import os
import random
# Global variables
server_ip = False
server_port = 46001
vernum = '0.6'
# Your name alias defaults to anonymous
alias='Anonymous'
debug=False
# Do not use a max rate by default
maxrate = False
# End global variables
# Print version information and exit
def version():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
# Print help information and exit:
def usage():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -s, --server-ip IP address of dnmap server."
print " -p, --server-port Port of dnmap server. Dnmap port defaults to 46001"
print " -a, --alias Your name alias so we can give credit to you for your help. Optional"
print " -d, --debug Debuging."
print " -m, --max-rate Force nmaps commands to use at most this rate. Useful to slow nmap down. Adds the --max-rate parameter."
print
sys.exit(1)
def check_clean(line):
global debug
try:
outbound_chars = [';', '#', '`']
ret = True
for char in outbound_chars:
if char in line:
ret = False
return ret
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global client_id
global alias
global debug
print 'Client connected succesfully...'
print 'Waiting for more commands....'
if debug:
print ' -- Your client ID is: {0} , and your alias is: {1}'.format(str(client_id), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid==0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(client_id),str(alias),vernum,iamroot)
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global client_id
global alias
# If a wait is received. just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines or CR nor LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
except IndexError:
random_file_name = str(random.randrange(0, 100000000, 1))
print '+ No -oA given. We add it anyway so not to lose the results. Added -oA ' + random_file_name
line = line + '-oA ' + random_file_name
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
try:
nmap_returncode = -1
# Check for rate commands
# Verfiy that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDACE
if 'min-rate' in line:
temp_vect = shlex.split(line)
word_index = temp_vect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmap_command = temp_vect[0:word_index] + temp_vect[word_index + 1:]
else:
nmap_command = shlex.split(line)
# Do we have to add a max-rate parameter?
if maxrate:
nmap_command.append('--max-rate')
nmap_command.append(str((maxrate)))
# Strip the command, so we can controll that only nmap is executed really
nmap_command = nmap_command[1:]
nmap_command.insert(0, 'nmap')
# Recreate the final command to show it
nmap_command_string = ''
for i in nmap_command:
nmap_command_string = nmap_command_string + i + ' '
print "\tCommand Executed: {0}".format(nmap_command_string)
# For some reason this executable thing does not work! seems to change nmap sP for sS
# nmap_process = Popen(nmap_command,executable='nmap',stdout=PIPE)
nmap_process = Popen(nmap_command, stdout=PIPE)
raw_nmap_output = nmap_process.communicate()[0]
nmap_returncode = nmap_process.returncode
except OSError:
print 'You don\'t have nmap installed. You can install it with apt-get install nmap'
exit(-1)
except ValueError:
raw_nmap_output = 'Invalid nmap arguments.'
print raw_nmap_output
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
if nmap_returncode >= 0:
# Nmap ended ok and the files were created
if os.path.isfile(nmap_output_file + ".xml") and os.path.isfile(
nmap_output_file + ".gnmap") and os.path.isfile(nmap_output_file + ".nmap"):
with open(nmap_output_file + ".xml", "r") as f:
XMLData = f.read()
with open(nmap_output_file + ".gnmap", "r") as f:
GNmapData = f.read()
with open(nmap_output_file + ".nmap", "r") as f:
NmapData = f.read()
xml_linesep = "\r\n#XMLOUTPUT#\r\n"
gnmap_linesep = "\r\n#GNMAPOUTPUT#\r\n"
# Tell the server that we are sending the nmap output
print '\tSending output to the server...'
line = 'Nmap Output File:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
line = NmapData + xml_linesep + XMLData + gnmap_linesep + GNmapData
# line = raw_nmap_output + xml_linesep + XMLData + gnmap_linesep + GNmapData
print 'GNmapData: {}'.format(len(NmapData))
print 'xml_linesep: {}'.format(len(xml_linesep))
print 'XMLData: {}'.format(len(XMLData))
print 'gnmap_linesep: {}'.format(len(gnmap_linesep))
print 'GNmapData: {}'.format(len(GNmapData))
self.sendLine(line)
if debug:
print ' -- Line sent: {0}'.format(line)
line = 'Nmap Output Finished:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
# Move nmap output files to its directory
os.system('mv *.nmap nmap_output > /dev/null 2>&1')
os.system('mv *.gnmap nmap_output > /dev/null 2>&1')
os.system('mv *.xml nmap_output > /dev/null 2>&1')
# Ask for another command.
# 'Client ID' text must be sent to receive another command
print 'Waiting for more commands....'
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# Something strange was sent to us...
print
print 'WARNING! Ignoring some strange command was sent to us: {0}'.format(line)
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
class NmapClientFactory(ReconnectingClientFactory):
try:
protocol = NmapClient
def startedConnecting(self, connector):
print 'Starting connection...'
def clientConnectionFailed(self, connector, reason):
print 'Connection failed:', reason.getErrorMessage()
# Try to reconnect
print 'Trying to reconnect. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
print 'Connection lost. Reason: {0}'.format(reason.getErrorMessage())
# Try to reconnect
print 'Trying to reconnect in 10 secs. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
except Exception as inst:
print 'Problem in NmapClientFactory'
print type(inst)
print inst.args
print inst
def process_commands():
global server_ip
global server_port
global client_id
global factory
try:
print 'Client Started...'
# Generate the client unique ID
client_id = str(random.randrange(0, 100000000, 1))
# Create the output directory
print 'Nmap output files stored in \'nmap_output\' directory...'
os.system('mkdir nmap_output > /dev/null 2>&1')
factory = NmapClientFactory()
# Do not wait more that 10 seconds between reconnections
factory.maxDelay = 2
reactor.connectSSL(str(server_ip), int(server_port), factory, ssl.ClientContextFactory())
#reactor.addSystemEventTrigger('before','shutdown',myCleanUpFunction)
reactor.run()
except Exception as inst:
print 'Problem in process_commands function'
print type(inst)
print inst.args
print inst
def main():
global server_ip
global server_port
global alias
global debug
global maxrate
try:
opts, args = getopt.getopt(sys.argv[1:], "a:dm:p:s:", ["server-ip=","server-port","max-rate","alias=","debug"])
except getopt.GetoptError: usage()
for opt, arg in opts:
|
try:
if server_ip and server_port:
version()
# Start connecting
process_commands()
else:
usage()
except KeyboardInterrupt:
# CTRL-C pretty handling.
print "Keyboard Interruption!. Exiting."
sys.exit(1)
if __name__ == '__main__':
main()
|
if opt in ("-s", "--server-ip"): server_ip=str(arg)
if opt in ("-p", "--server-port"): server_port=arg
if opt in ("-a", "--alias"): alias=str(arg).strip('\n').strip('\r').strip(' ')
if opt in ("-d", "--debug"): debug=True
if opt in ("-m", "--max-rate"): maxrate=str(arg)
|
conditional_block
|
dnc.py
|
#! /usr/bin/env python
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia [email protected]
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# CHANGELOG
# 0.6
# - Added some more chars to the command injection prevention.
# - Clients decide the nmap scanning rate.
# - If the server sends a --min-rate parameter, we now delete it. WE control the scan speed.
# - Clients decide the nmap scanning rate.
# - Exit if nmap is not installed
# - Stop sending the euid, it was a privacy violation. Now we just say if we are root or not.
#
# TODO
# - privileges on nmap
#
try:
from OpenSSL import SSL
except:
print 'You need openssl libs for python. apt-get install python-openssl'
exit(-1)
import sys
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print 'You need twisted libs for python. apt-get install python-twisted'
exit(-1)
import time, getopt, shlex
from subprocess import Popen
from subprocess import PIPE
import os
import random
# Global variables
server_ip = False
server_port = 46001
vernum = '0.6'
# Your name alias defaults to anonymous
alias='Anonymous'
debug=False
# Do not use a max rate by default
maxrate = False
# End global variables
# Print version information and exit
def version():
|
# Print help information and exit:
def usage():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -s, --server-ip IP address of dnmap server."
print " -p, --server-port Port of dnmap server. Dnmap port defaults to 46001"
print " -a, --alias Your name alias so we can give credit to you for your help. Optional"
print " -d, --debug Debuging."
print " -m, --max-rate Force nmaps commands to use at most this rate. Useful to slow nmap down. Adds the --max-rate parameter."
print
sys.exit(1)
def check_clean(line):
global debug
try:
outbound_chars = [';', '#', '`']
ret = True
for char in outbound_chars:
if char in line:
ret = False
return ret
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global client_id
global alias
global debug
print 'Client connected succesfully...'
print 'Waiting for more commands....'
if debug:
print ' -- Your client ID is: {0} , and your alias is: {1}'.format(str(client_id), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid==0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(client_id),str(alias),vernum,iamroot)
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global client_id
global alias
# If a wait is received. just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines or CR nor LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
except IndexError:
random_file_name = str(random.randrange(0, 100000000, 1))
print '+ No -oA given. We add it anyway so not to lose the results. Added -oA ' + random_file_name
line = line + '-oA ' + random_file_name
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
try:
nmap_returncode = -1
# Check for rate commands
# Verfiy that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDACE
if 'min-rate' in line:
temp_vect = shlex.split(line)
word_index = temp_vect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmap_command = temp_vect[0:word_index] + temp_vect[word_index + 1:]
else:
nmap_command = shlex.split(line)
# Do we have to add a max-rate parameter?
if maxrate:
nmap_command.append('--max-rate')
nmap_command.append(str((maxrate)))
# Strip the command, so we can controll that only nmap is executed really
nmap_command = nmap_command[1:]
nmap_command.insert(0, 'nmap')
# Recreate the final command to show it
nmap_command_string = ''
for i in nmap_command:
nmap_command_string = nmap_command_string + i + ' '
print "\tCommand Executed: {0}".format(nmap_command_string)
# For some reason this executable thing does not work! seems to change nmap sP for sS
# nmap_process = Popen(nmap_command,executable='nmap',stdout=PIPE)
nmap_process = Popen(nmap_command, stdout=PIPE)
raw_nmap_output = nmap_process.communicate()[0]
nmap_returncode = nmap_process.returncode
except OSError:
print 'You don\'t have nmap installed. You can install it with apt-get install nmap'
exit(-1)
except ValueError:
raw_nmap_output = 'Invalid nmap arguments.'
print raw_nmap_output
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
if nmap_returncode >= 0:
# Nmap ended ok and the files were created
if os.path.isfile(nmap_output_file + ".xml") and os.path.isfile(
nmap_output_file + ".gnmap") and os.path.isfile(nmap_output_file + ".nmap"):
with open(nmap_output_file + ".xml", "r") as f:
XMLData = f.read()
with open(nmap_output_file + ".gnmap", "r") as f:
GNmapData = f.read()
with open(nmap_output_file + ".nmap", "r") as f:
NmapData = f.read()
xml_linesep = "\r\n#XMLOUTPUT#\r\n"
gnmap_linesep = "\r\n#GNMAPOUTPUT#\r\n"
# Tell the server that we are sending the nmap output
print '\tSending output to the server...'
line = 'Nmap Output File:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
line = NmapData + xml_linesep + XMLData + gnmap_linesep + GNmapData
# line = raw_nmap_output + xml_linesep + XMLData + gnmap_linesep + GNmapData
print 'GNmapData: {}'.format(len(NmapData))
print 'xml_linesep: {}'.format(len(xml_linesep))
print 'XMLData: {}'.format(len(XMLData))
print 'gnmap_linesep: {}'.format(len(gnmap_linesep))
print 'GNmapData: {}'.format(len(GNmapData))
self.sendLine(line)
if debug:
print ' -- Line sent: {0}'.format(line)
line = 'Nmap Output Finished:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
# Move nmap output files to its directory
os.system('mv *.nmap nmap_output > /dev/null 2>&1')
os.system('mv *.gnmap nmap_output > /dev/null 2>&1')
os.system('mv *.xml nmap_output > /dev/null 2>&1')
# Ask for another command.
# 'Client ID' text must be sent to receive another command
print 'Waiting for more commands....'
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# Something strange was sent to us...
print
print 'WARNING! Ignoring some strange command was sent to us: {0}'.format(line)
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
class NmapClientFactory(ReconnectingClientFactory):
try:
protocol = NmapClient
def startedConnecting(self, connector):
print 'Starting connection...'
def clientConnectionFailed(self, connector, reason):
print 'Connection failed:', reason.getErrorMessage()
# Try to reconnect
print 'Trying to reconnect. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
print 'Connection lost. Reason: {0}'.format(reason.getErrorMessage())
# Try to reconnect
print 'Trying to reconnect in 10 secs. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
except Exception as inst:
print 'Problem in NmapClientFactory'
print type(inst)
print inst.args
print inst
def process_commands():
global server_ip
global server_port
global client_id
global factory
try:
print 'Client Started...'
# Generate the client unique ID
client_id = str(random.randrange(0, 100000000, 1))
# Create the output directory
print 'Nmap output files stored in \'nmap_output\' directory...'
os.system('mkdir nmap_output > /dev/null 2>&1')
factory = NmapClientFactory()
# Do not wait more that 10 seconds between reconnections
factory.maxDelay = 2
reactor.connectSSL(str(server_ip), int(server_port), factory, ssl.ClientContextFactory())
#reactor.addSystemEventTrigger('before','shutdown',myCleanUpFunction)
reactor.run()
except Exception as inst:
print 'Problem in process_commands function'
print type(inst)
print inst.args
print inst
def main():
global server_ip
global server_port
global alias
global debug
global maxrate
try:
opts, args = getopt.getopt(sys.argv[1:], "a:dm:p:s:", ["server-ip=","server-port","max-rate","alias=","debug"])
except getopt.GetoptError: usage()
for opt, arg in opts:
if opt in ("-s", "--server-ip"): server_ip=str(arg)
if opt in ("-p", "--server-port"): server_port=arg
if opt in ("-a", "--alias"): alias=str(arg).strip('\n').strip('\r').strip(' ')
if opt in ("-d", "--debug"): debug=True
if opt in ("-m", "--max-rate"): maxrate=str(arg)
try:
if server_ip and server_port:
version()
# Start connecting
process_commands()
else:
usage()
except KeyboardInterrupt:
# CTRL-C pretty handling.
print "Keyboard Interruption!. Exiting."
sys.exit(1)
if __name__ == '__main__':
main()
|
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
|
identifier_body
|
dnc.py
|
#! /usr/bin/env python
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia [email protected]
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# CHANGELOG
# 0.6
# - Added some more chars to the command injection prevention.
# - Clients decide the nmap scanning rate.
# - If the server sends a --min-rate parameter, we now delete it. WE control the scan speed.
# - Clients decide the nmap scanning rate.
# - Exit if nmap is not installed
# - Stop sending the euid, it was a privacy violation. Now we just say if we are root or not.
#
# TODO
# - privileges on nmap
#
try:
from OpenSSL import SSL
except:
print 'You need openssl libs for python. apt-get install python-openssl'
exit(-1)
import sys
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print 'You need twisted libs for python. apt-get install python-twisted'
exit(-1)
import time, getopt, shlex
from subprocess import Popen
from subprocess import PIPE
import os
import random
# Global variables
server_ip = False
server_port = 46001
vernum = '0.6'
# Your name alias defaults to anonymous
alias='Anonymous'
debug=False
# Do not use a max rate by default
maxrate = False
# End global variables
# Print version information and exit
def version():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
# Print help information and exit:
def
|
():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -s, --server-ip IP address of dnmap server."
print " -p, --server-port Port of dnmap server. Dnmap port defaults to 46001"
print " -a, --alias Your name alias so we can give credit to you for your help. Optional"
print " -d, --debug Debuging."
print " -m, --max-rate Force nmaps commands to use at most this rate. Useful to slow nmap down. Adds the --max-rate parameter."
print
sys.exit(1)
def check_clean(line):
global debug
try:
outbound_chars = [';', '#', '`']
ret = True
for char in outbound_chars:
if char in line:
ret = False
return ret
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global client_id
global alias
global debug
print 'Client connected succesfully...'
print 'Waiting for more commands....'
if debug:
print ' -- Your client ID is: {0} , and your alias is: {1}'.format(str(client_id), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid==0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(client_id),str(alias),vernum,iamroot)
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global client_id
global alias
# If a wait is received. just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines or CR nor LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
except IndexError:
random_file_name = str(random.randrange(0, 100000000, 1))
print '+ No -oA given. We add it anyway so not to lose the results. Added -oA ' + random_file_name
line = line + '-oA ' + random_file_name
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
try:
nmap_returncode = -1
# Check for rate commands
# Verfiy that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDACE
if 'min-rate' in line:
temp_vect = shlex.split(line)
word_index = temp_vect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmap_command = temp_vect[0:word_index] + temp_vect[word_index + 1:]
else:
nmap_command = shlex.split(line)
# Do we have to add a max-rate parameter?
if maxrate:
nmap_command.append('--max-rate')
nmap_command.append(str((maxrate)))
# Strip the command, so we can controll that only nmap is executed really
nmap_command = nmap_command[1:]
nmap_command.insert(0, 'nmap')
# Recreate the final command to show it
nmap_command_string = ''
for i in nmap_command:
nmap_command_string = nmap_command_string + i + ' '
print "\tCommand Executed: {0}".format(nmap_command_string)
# For some reason this executable thing does not work! seems to change nmap sP for sS
# nmap_process = Popen(nmap_command,executable='nmap',stdout=PIPE)
nmap_process = Popen(nmap_command, stdout=PIPE)
raw_nmap_output = nmap_process.communicate()[0]
nmap_returncode = nmap_process.returncode
except OSError:
print 'You don\'t have nmap installed. You can install it with apt-get install nmap'
exit(-1)
except ValueError:
raw_nmap_output = 'Invalid nmap arguments.'
print raw_nmap_output
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
if nmap_returncode >= 0:
# Nmap ended ok and the files were created
if os.path.isfile(nmap_output_file + ".xml") and os.path.isfile(
nmap_output_file + ".gnmap") and os.path.isfile(nmap_output_file + ".nmap"):
with open(nmap_output_file + ".xml", "r") as f:
XMLData = f.read()
with open(nmap_output_file + ".gnmap", "r") as f:
GNmapData = f.read()
with open(nmap_output_file + ".nmap", "r") as f:
NmapData = f.read()
xml_linesep = "\r\n#XMLOUTPUT#\r\n"
gnmap_linesep = "\r\n#GNMAPOUTPUT#\r\n"
# Tell the server that we are sending the nmap output
print '\tSending output to the server...'
line = 'Nmap Output File:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
line = NmapData + xml_linesep + XMLData + gnmap_linesep + GNmapData
# line = raw_nmap_output + xml_linesep + XMLData + gnmap_linesep + GNmapData
print 'GNmapData: {}'.format(len(NmapData))
print 'xml_linesep: {}'.format(len(xml_linesep))
print 'XMLData: {}'.format(len(XMLData))
print 'gnmap_linesep: {}'.format(len(gnmap_linesep))
print 'GNmapData: {}'.format(len(GNmapData))
self.sendLine(line)
if debug:
print ' -- Line sent: {0}'.format(line)
line = 'Nmap Output Finished:{0}:'.format(nmap_output_file.strip('\n').strip('\r'))
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
# Move nmap output files to its directory
os.system('mv *.nmap nmap_output > /dev/null 2>&1')
os.system('mv *.gnmap nmap_output > /dev/null 2>&1')
os.system('mv *.xml nmap_output > /dev/null 2>&1')
# Ask for another command.
# 'Client ID' text must be sent to receive another command
print 'Waiting for more commands....'
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# Something strange was sent to us...
print
print 'WARNING! Ignoring some strange command was sent to us: {0}'.format(line)
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
class NmapClientFactory(ReconnectingClientFactory):
try:
protocol = NmapClient
def startedConnecting(self, connector):
print 'Starting connection...'
def clientConnectionFailed(self, connector, reason):
print 'Connection failed:', reason.getErrorMessage()
# Try to reconnect
print 'Trying to reconnect. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
print 'Connection lost. Reason: {0}'.format(reason.getErrorMessage())
# Try to reconnect
print 'Trying to reconnect in 10 secs. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
except Exception as inst:
print 'Problem in NmapClientFactory'
print type(inst)
print inst.args
print inst
def process_commands():
global server_ip
global server_port
global client_id
global factory
try:
print 'Client Started...'
# Generate the client unique ID
client_id = str(random.randrange(0, 100000000, 1))
# Create the output directory
print 'Nmap output files stored in \'nmap_output\' directory...'
os.system('mkdir nmap_output > /dev/null 2>&1')
factory = NmapClientFactory()
# Do not wait more that 10 seconds between reconnections
factory.maxDelay = 2
reactor.connectSSL(str(server_ip), int(server_port), factory, ssl.ClientContextFactory())
#reactor.addSystemEventTrigger('before','shutdown',myCleanUpFunction)
reactor.run()
except Exception as inst:
print 'Problem in process_commands function'
print type(inst)
print inst.args
print inst
def main():
global server_ip
global server_port
global alias
global debug
global maxrate
try:
opts, args = getopt.getopt(sys.argv[1:], "a:dm:p:s:", ["server-ip=","server-port","max-rate","alias=","debug"])
except getopt.GetoptError: usage()
for opt, arg in opts:
if opt in ("-s", "--server-ip"): server_ip=str(arg)
if opt in ("-p", "--server-port"): server_port=arg
if opt in ("-a", "--alias"): alias=str(arg).strip('\n').strip('\r').strip(' ')
if opt in ("-d", "--debug"): debug=True
if opt in ("-m", "--max-rate"): maxrate=str(arg)
try:
if server_ip and server_port:
version()
# Start connecting
process_commands()
else:
usage()
except KeyboardInterrupt:
# CTRL-C pretty handling.
print "Keyboard Interruption!. Exiting."
sys.exit(1)
if __name__ == '__main__':
main()
|
usage
|
identifier_name
|
policytree.py
|
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
|
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
|
identifier_body
|
policytree.py
|
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
|
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
return (False, None)
|
conditional_block
|
policytree.py
|
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def
|
(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
labelDuplicates
|
identifier_name
|
policytree.py
|
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
|
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
|
if __name__ == "__main__":
|
random_line_split
|
merge.js
|
import React from 'react'
import Icon from 'react-icon-base'
const IoMerge = props => (
<Icon viewBox="0 0 40 40" {...props}>
<g><path d="m30 17.5c2.7 0 5 2.3 5 5s-2.3 5-5 5c-1.9 0-3.4-1-4.3-2.5h-0.8c-4.7 0-9-2-12.4-5.8v9c1.5 0.9 2.5 2.4 2.5 4.3 0 2.7-2.3 5-5 5s-5-2.3-5-5c0-1.9 1-3.4 2.5-4.3v-16.4c-1.5-0.9-2.5-2.4-2.5-4.3 0-2.7 2.3-5 5-5s5 2.3 5 5c0 1.5-0.6 2.9-1.7 3.8 0.3 0.7 1.3 2.8 2.9 4.6 2.5 2.7 5.4 4.1 8.7 4.1h0.8c0.9-1.5 2.4-2.5 4.3-2.5z m-20-12.5c-1.4 0-2.5 1.1-2.5 2.5s1.1 2.5 2.5 2.5 2.5-1.1 2.5-2.5-1.1-2.5-2.5-2.5z m0 30c1.4 0 2.5-1.1 2.5-2.5s-1.1-2.5-2.5-2.5-2.5 1.1-2.5 2.5 1.1 2.5 2.5 2.5z m20-10c1.4 0 2.5-1.1 2.5-2.5s-1.1-2.5-2.5-2.5-2.5 1.1-2.5 2.5 1.1 2.5 2.5 2.5z"/></g>
|
export default IoMerge
|
</Icon>
)
|
random_line_split
|
RHPF.js
|
import assert from "assert";
import RHPF from "../../../src/scapi/units/RHPF";
import createNode from "../../../src/scapi/utils/createNode";
describe("scapi/units/RHPF", () => {
it(".ar should create audio rate node", () => {
const a = createNode("SinOsc", "audio", [ 440, 0 ]);
const node = RHPF.ar(a, 1000, 2);
assert.deepEqual(node, {
type: "RHPF", rate: "audio", props: [ a, 1000, 2 ]
});
});
it(".kr should create control rate node", () => {
const a = createNode("SinOsc", "control", [ 440, 0 ]);
const node = RHPF.kr(a, 1000, 2);
|
it("default rate is the same as the first input", () => {
const a = createNode("SinOsc", "audio", [ 440, 0 ]);
const node = RHPF(a);
assert.deepEqual(node, {
type: "RHPF", rate: "audio", props: [ a, 440, 1 ]
});
});
});
|
assert.deepEqual(node, {
type: "RHPF", rate: "control", props: [ a, 1000, 2 ]
});
});
|
random_line_split
|
chromosome_simulator.py
|
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import random
import sys
import re
import os.path
import markov_gen
def parse_params(args):
parser = argparse.ArgumentParser(description = "Generate simulated chromosome")
# parser.add_argument('-c', '--cutoff', type = int, help = "Limit model to first c non-N bases")
parser.add_argument('-k', type = int, help = "Order of Markov chain", default = 5)
parser.add_argument('-s', '--seed', '-rng_seed', dest = 'seed', type = int, help = "RNG seed", default = None)
parser.add_argument('-n', '--negative_strand', action = "store_true", help = "Use repeats on negative string", default = False)
parser.add_argument('--family_file', help = "List of repeat families to use", default = None)
parser.add_argument('-m', '--mask', action = "store_true", help = "Turn masking on (all repeats printed as lower case).", default = False)
parser.add_argument('--mc', '--mc_file', dest = 'mc_file', help = "MC File (by default -- look in local directory; generates if not found).", default = None)
parser.add_argument('-S', '--suppress_pmck', action = "store_true", help = "Suppress the generation of a .pmc<k> file to store the markov chain for re-use")
parser.add_argument('--mi', '--max_interval', dest = "max_interval", type = int, help = "Maximum allowed length of interval between repeats; -1 value (default) means no maximum", default = -1)
parser.add_argument('--mi2', '--min_interval', dest = "min_interval", type = int, help = "Min allowed length of interval between repeats", default = 0)
parser.add_argument('--rn', '--retain_n', dest = "retain_n", action = 'store_true', help = "If used, will use the whole chromosome. Otherwise, cuts of Ns at either end.", default = False)
parser.add_argument('--nr', '--num_repeats', dest = 'num_repeats', type = int, help = "Specify the number of repeats. Simulation will terminate either 1000 bases or max interval bases past the nth instance of a repeat (excluding any other repeats in that range).", default = None)
parser.add_argument('-l', '--max_length', dest = 'max_length', type = int, help = "Maximum allowed length of simulated sequence.", default = None)
parser.add_argument('--lc', '--low_complexity', dest = 'low_complexity', action = 'store_true', help = "Keep low complexity and simple repeats (kept by default)", default = False)
parser.add_argument('--rb', '--rep_base', dest = 'rep_base', help = "Replace each TE with a ful copy of its ancestral seqeunce in the specified RepBase file", default = None)
parser.add_argument('-f', '--family_min', dest = "family_min", type = int, help = "Number of elements per family", default = 2)
parser.add_argument('--nf', '--num_family', dest = 'num_family', type = int, help = "Number of families", default = None)
#parser.add_argument('-o', '--output', help = "Output file (Default: replace chomosome file \".fa\" with \".sim.fa\")")
parser.add_argument("seq_file", help = "Sequence file (must be .fa)")
parser.add_argument("repeat_file", help = "RepeatMasker file (.fa.out)")
parser.add_argument("output", help = "Output file")
return parser.parse_args(args)
def nextRepeat(rpt_file, use_negative = True, S = {}, E = {}, I = {}):
"""Generator: each invokation returns the chromosome, start, finish, starand,
and family for the next repeat of the repeatmasker .fa.out files. S, if not empty,
is a filter for which repeats to use."""
fp = open(rpt_file)
fp.readline()
fp.readline()
for line in fp:
if line.rstrip():
A = re.split("\s+", line.strip())
chr, start, finish, strand, family, rpt_class, rpt_id = A[4], int(A[5])-1, int(A[6]), A[8], A[9], A[10], A[14]
if strand == '-' and not use_negative:
continue
if S and any([s in family for s in S]):
continue
if E and any([e in rpt_class for e in E]):
continue
if I and not family in I:
continue
if (strand == '+' or use_negative) and ((family in S) or not S) and not (rpt_class in E):
yield chr, start, finish, strand, family, rpt_class, int(rpt_id)
# fa_out_header: The fixed header lines for the .fa.out file
fa_out_header = "\tSW\tperc\tperc\tperc\tquery\tposition in query\tmatching\trepeat\tposition in repeat\n\tscore\tdiv.\tdel.\tins.\tsequence\tbegin\tend\t(left)\trepeat\tclass/family\tbegin\tend (left)\tID\n"
# fa_out_template: A template for creating lines for the .fa.out file.
fa_out_template = "\t0\t0\t0\t0\t{chr}\t{start}\t{finish}\t({left})\t{strand}\t{family}\t{rpt_class}\t0\t0\t(0)\t{rpt_id}\n"
def generate_chromosome(seq, markov_list, chr_start, chr_finish, rpt_gen, mask = False, max_interval = -1, min_interval = 0,num_repeats = None, max_length = None, limiting_chr = None, rep_base_hash = None):
"""
Generate a syntehtic sequence with real repeats:
* seq: A sequence (as a string).
* markov_list: List of the k+1 i-th order markov chains (from the markov_gen module).
* start/finish: Defined the coordinates of our actual template sequence. (We are ignoring anything that occurs before/faster.
* Allows us to cut of a prefix and/or suffix.
* rpt_gen: A generating function returning the repeat information (created by nextRepeat)
* mask: If true, all repeats will be lower-case. Otherwise, upper case.)
* max_interval: Maximum inter-repeat length.
* min_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* extend the length.
* max_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* cut the length.
"""
last_end = chr_start
if max_interval == -1:
max_interval = len(seq)
sim_seq = "" # Simulated sequence
fa_out = [] # Hold the new .fa.out file contents (by line)
rpt_count = 0 # Count of repeats (so we can quit when we reach num_repeats, if applicable)
for chr, start, finish, strand, family, rpt_class, rpt_id in rpt_gen:
if limiting_chr and chr not in limiting_chr: # Skip if we are on the wrong chromsome
continue
if start >= chr_finish: # Quit if we have gone past the allowed range (repeats are assumed to be sorted by start)
break
if start < chr_start or finish > chr_finish: # Skip if we are outside the allowed range
continue
if start < last_end: # Skip if this repeat overlapped the last one
continue
rpt_count += 1
# Add the next inter-TE sequence
inter_seq_len = max(min_interval, min(start - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
# Add the next sequence
if rep_base_hash:
rpt_seq = rep_base_hash[family]
else:
rpt_seq = seq[start:finish]
fa_out.append([chr, len(sim_seq)+1, len(sim_seq) + len(rpt_seq), strand, family, rpt_class, rpt_id]) # Coords adjusted for biologist notation
sim_seq += rpt_seq.lower() if mask else rpt_seq.upper()
if rpt_count == num_repeats:
break
last_end = max(last_end, finish)
# Add final sequence on
final_seq_len = max(min_interval, min(chr_finish - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
sim_seq_len = len(sim_seq)
fa_out_str = fa_out_header
for chr, start, finish, strand, family, rpt_class, rpt_id in fa_out:
fa_out_str += fa_out_template.format(chr=chr, start=start, finish=finish, left = sim_seq_len - finish, strand=strand, family=family, rpt_class=rpt_class, rpt_id=rpt_id)
return sim_seq, fa_out_str
bases = set("ACGTacgt")
def
|
(seq_file, k, suppress_save = False, mc_file = None, retain_n = False):
"""Load the sequence and the Markov Chain List.
Load the MC list from a file if it exists. If not, create the chain
and save it to the file for the next use (skip the save if suppressed).
Parameters:
* seq_file: The sequence file.
* k: The order of the markov chain.
* suppress_save: Boolean. If true, don't save the generated MC file. (Can't imagine why we would want this.)
* mc_file: The name of the mc_file to use. (Derive from seq_file if not provided.)
* retrain_n: If false, we will be cutting of the largest possible N* prefix and suffix.
Return: A tuple:
1. The chromosome sequence.
2. The markov chain
3. Where we will start in the template sequence (in case a prefix has been removed).
4. Where we will end in the templace sequence (in case a suffix has been removed).
"""
template_seq = str(SeqIO.read(seq_file, 'fasta').seq)
# Cut out all the maximul prefix and suffix of ambiguity codes -- which will have no effect on the Markov chain construction.
start, finish = 0, len(template_seq)
if not retain_n: # Cut down the chromsome to the first real base at each end -- eliminate trailing Ns.
while template_seq[start] not in bases: start += 1
while template_seq[finish-1] not in bases: finish -= 1
mc_file = re.sub("\.(fa|fasta)$", ".pmc%d" % (k), seq_file) if mc_file is None else mc_file
if os.path.exists(mc_file):
markov_list = markov_gen.read_pmck(mc_file)
else:
markov_list = markov_gen.MarkovArray(k, template_seq)
if not suppress_save:
markov_gen.pickle_markov_list(markov_list, mc_file)
return template_seq, markov_list, start, finish
def readRepBase(file):
return {R.id:"".join([x for x in str(R.seq) if x.upper() in {'A', 'C', 'G', 'T'}]) for R in SeqIO.parse(file, 'fasta')}
low_complexity = {'Low_complexity', 'Simple', 'Satellite'}
def select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash):
"""Used to select those families that have at least f members on the template chromosome.
Parameters:
* repeat_file: the .fa.out file
* f minimum number of allowed instances in a family.
* num_fams: Number of families to be choosen
* use_3prime: if false, ignore instances on the 3' strand
* filter_set: families that should be ignored
* toss_low: if true, ignore low-complexity families
* rep_base_hash: a hash table mapping family names to their rep_base sequences
Returns:
* List of families chosen
"""
C = {} # Family name -> count
for T in nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}):
if rep_base_hash and not T[4] in rep_base_hash:
continue
if T[4] in C:
C[T[4]] += 1
else:
C[T[4]] = 1
L = [k for k in C if C[k] >= f]
if num_fams == None:
return L
if num_fams > len(L):
sys.stderr.write("Not enough families for f\n")
exit(1);
return L[:num_fams]
def create_chromosome_file(seq_file, repeat_file, output_file, k = 5, use_3prime = True, filter_file = "rpt_list.txt", mask = False, seed = None, suppress = False, max_interval = -1, min_interval = 0, retain_n = False, num_repeats = None, max_length = None, toss_low = False, rep_base = None, f = 1, num_fams = None):
"""
Create a simualted chrosome with real repeat sequences from a chromsoe file.
Parameters:
* seq_file: fasta <seq>.fa, file containing the template sequence.
-- Assumed to exist a file <seq>.fa.out containing the repeatmasker annotations.
* k: Use a k-order markov chain. There must exists a markov chain file <seq>.pmc<k>.
* output_file: Fasta file to print sequence to.
* use_3prime: If false, only sequence on the 5' strand will be used. Default: True
* filter_file: A list of the repeats that should be used. If empty: all repeats. Default: "rpt_list.txt"
* mask: If true: copied repeats will be in lower case. Default: False
* seed: RNG seed
"""
if not output_file.endswith(".fa"):
output_file += ".fa"
random.seed(args.seed)
# First: load in the template sequence, markov chain, and the start/end coords of what we are using.
template_seq, markov_list, chr_start, chr_finish = loadSeqAndChain(args.seq_file, args.k, suppress, args.mc_file, args.retain_n)
# Read in the set of families to be ignored
filter_set = {y.strip() for line in open(filter_file) for y in re.split("\s+", line.rstrip())} if filter_file else {}
# Read in the RepBase sequence: maps name -> RepBaseSequence
rep_base_hash = readRepBase(rep_base) if rep_base else None # Hash of repeats ID -> sequences)
# Pick which families we are using.
selected = select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash)
# Create a sequence generator
rpt_gen = nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}, I = selected)
# Create the simulated sequence
simulated_sequence, fa_out = generate_chromosome(seq = template_seq, markov_list = markov_list, chr_start = chr_start, chr_finish = chr_finish, rpt_gen = rpt_gen, mask = mask, max_interval = max_interval, min_interval = min_interval, num_repeats = num_repeats, max_length = max_length, rep_base_hash = rep_base_hash)
# Write output to file
SeqIO.write([SeqRecord(seq = Seq(simulated_sequence), id = "seq_file", description = "Simulated sequence from %s using order %d markov chain" % (seq_file, len(markov_list)-1))], output_file, 'fasta')
open(output_file + ".out", "w").write(fa_out)
if __name__ == "__main__":
args = parse_params(sys.argv[1:])
create_chromosome_file(seq_file = args.seq_file, k = args.k, output_file = args.output,
repeat_file = args.repeat_file, use_3prime = args.negative_strand,
filter_file = args.family_file, mask = args.mask, seed = args.seed,
max_interval = args.max_interval, min_interval = args.min_interval, num_repeats = args.num_repeats,
max_length = args.max_length, toss_low = not args.low_complexity,
rep_base = args.rep_base, f = args.family_min, num_fams = args.num_family)
|
loadSeqAndChain
|
identifier_name
|
chromosome_simulator.py
|
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import random
import sys
import re
import os.path
import markov_gen
def parse_params(args):
parser = argparse.ArgumentParser(description = "Generate simulated chromosome")
# parser.add_argument('-c', '--cutoff', type = int, help = "Limit model to first c non-N bases")
parser.add_argument('-k', type = int, help = "Order of Markov chain", default = 5)
parser.add_argument('-s', '--seed', '-rng_seed', dest = 'seed', type = int, help = "RNG seed", default = None)
parser.add_argument('-n', '--negative_strand', action = "store_true", help = "Use repeats on negative string", default = False)
parser.add_argument('--family_file', help = "List of repeat families to use", default = None)
parser.add_argument('-m', '--mask', action = "store_true", help = "Turn masking on (all repeats printed as lower case).", default = False)
parser.add_argument('--mc', '--mc_file', dest = 'mc_file', help = "MC File (by default -- look in local directory; generates if not found).", default = None)
parser.add_argument('-S', '--suppress_pmck', action = "store_true", help = "Suppress the generation of a .pmc<k> file to store the markov chain for re-use")
parser.add_argument('--mi', '--max_interval', dest = "max_interval", type = int, help = "Maximum allowed length of interval between repeats; -1 value (default) means no maximum", default = -1)
parser.add_argument('--mi2', '--min_interval', dest = "min_interval", type = int, help = "Min allowed length of interval between repeats", default = 0)
parser.add_argument('--rn', '--retain_n', dest = "retain_n", action = 'store_true', help = "If used, will use the whole chromosome. Otherwise, cuts of Ns at either end.", default = False)
parser.add_argument('--nr', '--num_repeats', dest = 'num_repeats', type = int, help = "Specify the number of repeats. Simulation will terminate either 1000 bases or max interval bases past the nth instance of a repeat (excluding any other repeats in that range).", default = None)
parser.add_argument('-l', '--max_length', dest = 'max_length', type = int, help = "Maximum allowed length of simulated sequence.", default = None)
parser.add_argument('--lc', '--low_complexity', dest = 'low_complexity', action = 'store_true', help = "Keep low complexity and simple repeats (kept by default)", default = False)
parser.add_argument('--rb', '--rep_base', dest = 'rep_base', help = "Replace each TE with a ful copy of its ancestral seqeunce in the specified RepBase file", default = None)
parser.add_argument('-f', '--family_min', dest = "family_min", type = int, help = "Number of elements per family", default = 2)
parser.add_argument('--nf', '--num_family', dest = 'num_family', type = int, help = "Number of families", default = None)
#parser.add_argument('-o', '--output', help = "Output file (Default: replace chomosome file \".fa\" with \".sim.fa\")")
parser.add_argument("seq_file", help = "Sequence file (must be .fa)")
parser.add_argument("repeat_file", help = "RepeatMasker file (.fa.out)")
parser.add_argument("output", help = "Output file")
return parser.parse_args(args)
def nextRepeat(rpt_file, use_negative = True, S = {}, E = {}, I = {}):
|
# fa_out_header: The fixed header lines for the .fa.out file
fa_out_header = "\tSW\tperc\tperc\tperc\tquery\tposition in query\tmatching\trepeat\tposition in repeat\n\tscore\tdiv.\tdel.\tins.\tsequence\tbegin\tend\t(left)\trepeat\tclass/family\tbegin\tend (left)\tID\n"
# fa_out_template: A template for creating lines for the .fa.out file.
fa_out_template = "\t0\t0\t0\t0\t{chr}\t{start}\t{finish}\t({left})\t{strand}\t{family}\t{rpt_class}\t0\t0\t(0)\t{rpt_id}\n"
def generate_chromosome(seq, markov_list, chr_start, chr_finish, rpt_gen, mask = False, max_interval = -1, min_interval = 0,num_repeats = None, max_length = None, limiting_chr = None, rep_base_hash = None):
"""
Generate a syntehtic sequence with real repeats:
* seq: A sequence (as a string).
* markov_list: List of the k+1 i-th order markov chains (from the markov_gen module).
* start/finish: Defined the coordinates of our actual template sequence. (We are ignoring anything that occurs before/faster.
* Allows us to cut of a prefix and/or suffix.
* rpt_gen: A generating function returning the repeat information (created by nextRepeat)
* mask: If true, all repeats will be lower-case. Otherwise, upper case.)
* max_interval: Maximum inter-repeat length.
* min_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* extend the length.
* max_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* cut the length.
"""
last_end = chr_start
if max_interval == -1:
max_interval = len(seq)
sim_seq = "" # Simulated sequence
fa_out = [] # Hold the new .fa.out file contents (by line)
rpt_count = 0 # Count of repeats (so we can quit when we reach num_repeats, if applicable)
for chr, start, finish, strand, family, rpt_class, rpt_id in rpt_gen:
if limiting_chr and chr not in limiting_chr: # Skip if we are on the wrong chromsome
continue
if start >= chr_finish: # Quit if we have gone past the allowed range (repeats are assumed to be sorted by start)
break
if start < chr_start or finish > chr_finish: # Skip if we are outside the allowed range
continue
if start < last_end: # Skip if this repeat overlapped the last one
continue
rpt_count += 1
# Add the next inter-TE sequence
inter_seq_len = max(min_interval, min(start - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
# Add the next sequence
if rep_base_hash:
rpt_seq = rep_base_hash[family]
else:
rpt_seq = seq[start:finish]
fa_out.append([chr, len(sim_seq)+1, len(sim_seq) + len(rpt_seq), strand, family, rpt_class, rpt_id]) # Coords adjusted for biologist notation
sim_seq += rpt_seq.lower() if mask else rpt_seq.upper()
if rpt_count == num_repeats:
break
last_end = max(last_end, finish)
# Add final sequence on
final_seq_len = max(min_interval, min(chr_finish - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
sim_seq_len = len(sim_seq)
fa_out_str = fa_out_header
for chr, start, finish, strand, family, rpt_class, rpt_id in fa_out:
fa_out_str += fa_out_template.format(chr=chr, start=start, finish=finish, left = sim_seq_len - finish, strand=strand, family=family, rpt_class=rpt_class, rpt_id=rpt_id)
return sim_seq, fa_out_str
bases = set("ACGTacgt")
def loadSeqAndChain(seq_file, k, suppress_save = False, mc_file = None, retain_n = False):
"""Load the sequence and the Markov Chain List.
Load the MC list from a file if it exists. If not, create the chain
and save it to the file for the next use (skip the save if suppressed).
Parameters:
* seq_file: The sequence file.
* k: The order of the markov chain.
* suppress_save: Boolean. If true, don't save the generated MC file. (Can't imagine why we would want this.)
* mc_file: The name of the mc_file to use. (Derive from seq_file if not provided.)
* retrain_n: If false, we will be cutting of the largest possible N* prefix and suffix.
Return: A tuple:
1. The chromosome sequence.
2. The markov chain
3. Where we will start in the template sequence (in case a prefix has been removed).
4. Where we will end in the templace sequence (in case a suffix has been removed).
"""
template_seq = str(SeqIO.read(seq_file, 'fasta').seq)
# Cut out all the maximul prefix and suffix of ambiguity codes -- which will have no effect on the Markov chain construction.
start, finish = 0, len(template_seq)
if not retain_n: # Cut down the chromsome to the first real base at each end -- eliminate trailing Ns.
while template_seq[start] not in bases: start += 1
while template_seq[finish-1] not in bases: finish -= 1
mc_file = re.sub("\.(fa|fasta)$", ".pmc%d" % (k), seq_file) if mc_file is None else mc_file
if os.path.exists(mc_file):
markov_list = markov_gen.read_pmck(mc_file)
else:
markov_list = markov_gen.MarkovArray(k, template_seq)
if not suppress_save:
markov_gen.pickle_markov_list(markov_list, mc_file)
return template_seq, markov_list, start, finish
def readRepBase(file):
return {R.id:"".join([x for x in str(R.seq) if x.upper() in {'A', 'C', 'G', 'T'}]) for R in SeqIO.parse(file, 'fasta')}
low_complexity = {'Low_complexity', 'Simple', 'Satellite'}
def select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash):
"""Used to select those families that have at least f members on the template chromosome.
Parameters:
* repeat_file: the .fa.out file
* f minimum number of allowed instances in a family.
* num_fams: Number of families to be choosen
* use_3prime: if false, ignore instances on the 3' strand
* filter_set: families that should be ignored
* toss_low: if true, ignore low-complexity families
* rep_base_hash: a hash table mapping family names to their rep_base sequences
Returns:
* List of families chosen
"""
C = {} # Family name -> count
for T in nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}):
if rep_base_hash and not T[4] in rep_base_hash:
continue
if T[4] in C:
C[T[4]] += 1
else:
C[T[4]] = 1
L = [k for k in C if C[k] >= f]
if num_fams == None:
return L
if num_fams > len(L):
sys.stderr.write("Not enough families for f\n")
exit(1);
return L[:num_fams]
def create_chromosome_file(seq_file, repeat_file, output_file, k = 5, use_3prime = True, filter_file = "rpt_list.txt", mask = False, seed = None, suppress = False, max_interval = -1, min_interval = 0, retain_n = False, num_repeats = None, max_length = None, toss_low = False, rep_base = None, f = 1, num_fams = None):
"""
Create a simualted chrosome with real repeat sequences from a chromsoe file.
Parameters:
* seq_file: fasta <seq>.fa, file containing the template sequence.
-- Assumed to exist a file <seq>.fa.out containing the repeatmasker annotations.
* k: Use a k-order markov chain. There must exists a markov chain file <seq>.pmc<k>.
* output_file: Fasta file to print sequence to.
* use_3prime: If false, only sequence on the 5' strand will be used. Default: True
* filter_file: A list of the repeats that should be used. If empty: all repeats. Default: "rpt_list.txt"
* mask: If true: copied repeats will be in lower case. Default: False
* seed: RNG seed
"""
if not output_file.endswith(".fa"):
output_file += ".fa"
random.seed(args.seed)
# First: load in the template sequence, markov chain, and the start/end coords of what we are using.
template_seq, markov_list, chr_start, chr_finish = loadSeqAndChain(args.seq_file, args.k, suppress, args.mc_file, args.retain_n)
# Read in the set of families to be ignored
filter_set = {y.strip() for line in open(filter_file) for y in re.split("\s+", line.rstrip())} if filter_file else {}
# Read in the RepBase sequence: maps name -> RepBaseSequence
rep_base_hash = readRepBase(rep_base) if rep_base else None # Hash of repeats ID -> sequences)
# Pick which families we are using.
selected = select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash)
# Create a sequence generator
rpt_gen = nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}, I = selected)
# Create the simulated sequence
simulated_sequence, fa_out = generate_chromosome(seq = template_seq, markov_list = markov_list, chr_start = chr_start, chr_finish = chr_finish, rpt_gen = rpt_gen, mask = mask, max_interval = max_interval, min_interval = min_interval, num_repeats = num_repeats, max_length = max_length, rep_base_hash = rep_base_hash)
# Write output to file
SeqIO.write([SeqRecord(seq = Seq(simulated_sequence), id = "seq_file", description = "Simulated sequence from %s using order %d markov chain" % (seq_file, len(markov_list)-1))], output_file, 'fasta')
open(output_file + ".out", "w").write(fa_out)
if __name__ == "__main__":
args = parse_params(sys.argv[1:])
create_chromosome_file(seq_file = args.seq_file, k = args.k, output_file = args.output,
repeat_file = args.repeat_file, use_3prime = args.negative_strand,
filter_file = args.family_file, mask = args.mask, seed = args.seed,
max_interval = args.max_interval, min_interval = args.min_interval, num_repeats = args.num_repeats,
max_length = args.max_length, toss_low = not args.low_complexity,
rep_base = args.rep_base, f = args.family_min, num_fams = args.num_family)
|
"""Generator: each invokation returns the chromosome, start, finish, starand,
and family for the next repeat of the repeatmasker .fa.out files. S, if not empty,
is a filter for which repeats to use."""
fp = open(rpt_file)
fp.readline()
fp.readline()
for line in fp:
if line.rstrip():
A = re.split("\s+", line.strip())
chr, start, finish, strand, family, rpt_class, rpt_id = A[4], int(A[5])-1, int(A[6]), A[8], A[9], A[10], A[14]
if strand == '-' and not use_negative:
continue
if S and any([s in family for s in S]):
continue
if E and any([e in rpt_class for e in E]):
continue
if I and not family in I:
continue
if (strand == '+' or use_negative) and ((family in S) or not S) and not (rpt_class in E):
yield chr, start, finish, strand, family, rpt_class, int(rpt_id)
|
identifier_body
|
chromosome_simulator.py
|
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import random
import sys
import re
import os.path
import markov_gen
def parse_params(args):
parser = argparse.ArgumentParser(description = "Generate simulated chromosome")
# parser.add_argument('-c', '--cutoff', type = int, help = "Limit model to first c non-N bases")
parser.add_argument('-k', type = int, help = "Order of Markov chain", default = 5)
parser.add_argument('-s', '--seed', '-rng_seed', dest = 'seed', type = int, help = "RNG seed", default = None)
parser.add_argument('-n', '--negative_strand', action = "store_true", help = "Use repeats on negative string", default = False)
parser.add_argument('--family_file', help = "List of repeat families to use", default = None)
parser.add_argument('-m', '--mask', action = "store_true", help = "Turn masking on (all repeats printed as lower case).", default = False)
parser.add_argument('--mc', '--mc_file', dest = 'mc_file', help = "MC File (by default -- look in local directory; generates if not found).", default = None)
parser.add_argument('-S', '--suppress_pmck', action = "store_true", help = "Suppress the generation of a .pmc<k> file to store the markov chain for re-use")
parser.add_argument('--mi', '--max_interval', dest = "max_interval", type = int, help = "Maximum allowed length of interval between repeats; -1 value (default) means no maximum", default = -1)
parser.add_argument('--mi2', '--min_interval', dest = "min_interval", type = int, help = "Min allowed length of interval between repeats", default = 0)
parser.add_argument('--rn', '--retain_n', dest = "retain_n", action = 'store_true', help = "If used, will use the whole chromosome. Otherwise, cuts of Ns at either end.", default = False)
parser.add_argument('--nr', '--num_repeats', dest = 'num_repeats', type = int, help = "Specify the number of repeats. Simulation will terminate either 1000 bases or max interval bases past the nth instance of a repeat (excluding any other repeats in that range).", default = None)
parser.add_argument('-l', '--max_length', dest = 'max_length', type = int, help = "Maximum allowed length of simulated sequence.", default = None)
parser.add_argument('--lc', '--low_complexity', dest = 'low_complexity', action = 'store_true', help = "Keep low complexity and simple repeats (kept by default)", default = False)
parser.add_argument('--rb', '--rep_base', dest = 'rep_base', help = "Replace each TE with a ful copy of its ancestral seqeunce in the specified RepBase file", default = None)
parser.add_argument('-f', '--family_min', dest = "family_min", type = int, help = "Number of elements per family", default = 2)
parser.add_argument('--nf', '--num_family', dest = 'num_family', type = int, help = "Number of families", default = None)
#parser.add_argument('-o', '--output', help = "Output file (Default: replace chomosome file \".fa\" with \".sim.fa\")")
parser.add_argument("seq_file", help = "Sequence file (must be .fa)")
parser.add_argument("repeat_file", help = "RepeatMasker file (.fa.out)")
parser.add_argument("output", help = "Output file")
return parser.parse_args(args)
def nextRepeat(rpt_file, use_negative = True, S = {}, E = {}, I = {}):
"""Generator: each invokation returns the chromosome, start, finish, starand,
and family for the next repeat of the repeatmasker .fa.out files. S, if not empty,
is a filter for which repeats to use."""
fp = open(rpt_file)
fp.readline()
fp.readline()
for line in fp:
if line.rstrip():
A = re.split("\s+", line.strip())
chr, start, finish, strand, family, rpt_class, rpt_id = A[4], int(A[5])-1, int(A[6]), A[8], A[9], A[10], A[14]
if strand == '-' and not use_negative:
continue
if S and any([s in family for s in S]):
continue
if E and any([e in rpt_class for e in E]):
continue
if I and not family in I:
continue
if (strand == '+' or use_negative) and ((family in S) or not S) and not (rpt_class in E):
yield chr, start, finish, strand, family, rpt_class, int(rpt_id)
# fa_out_header: The fixed header lines for the .fa.out file
fa_out_header = "\tSW\tperc\tperc\tperc\tquery\tposition in query\tmatching\trepeat\tposition in repeat\n\tscore\tdiv.\tdel.\tins.\tsequence\tbegin\tend\t(left)\trepeat\tclass/family\tbegin\tend (left)\tID\n"
# fa_out_template: A template for creating lines for the .fa.out file.
fa_out_template = "\t0\t0\t0\t0\t{chr}\t{start}\t{finish}\t({left})\t{strand}\t{family}\t{rpt_class}\t0\t0\t(0)\t{rpt_id}\n"
def generate_chromosome(seq, markov_list, chr_start, chr_finish, rpt_gen, mask = False, max_interval = -1, min_interval = 0,num_repeats = None, max_length = None, limiting_chr = None, rep_base_hash = None):
"""
Generate a syntehtic sequence with real repeats:
* seq: A sequence (as a string).
* markov_list: List of the k+1 i-th order markov chains (from the markov_gen module).
* start/finish: Defined the coordinates of our actual template sequence. (We are ignoring anything that occurs before/faster.
* Allows us to cut of a prefix and/or suffix.
* rpt_gen: A generating function returning the repeat information (created by nextRepeat)
* mask: If true, all repeats will be lower-case. Otherwise, upper case.)
* max_interval: Maximum inter-repeat length.
* min_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* extend the length.
* max_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* cut the length.
"""
last_end = chr_start
if max_interval == -1:
|
sim_seq = "" # Simulated sequence
fa_out = [] # Hold the new .fa.out file contents (by line)
rpt_count = 0 # Count of repeats (so we can quit when we reach num_repeats, if applicable)
for chr, start, finish, strand, family, rpt_class, rpt_id in rpt_gen:
if limiting_chr and chr not in limiting_chr: # Skip if we are on the wrong chromsome
continue
if start >= chr_finish: # Quit if we have gone past the allowed range (repeats are assumed to be sorted by start)
break
if start < chr_start or finish > chr_finish: # Skip if we are outside the allowed range
continue
if start < last_end: # Skip if this repeat overlapped the last one
continue
rpt_count += 1
# Add the next inter-TE sequence
inter_seq_len = max(min_interval, min(start - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
# Add the next sequence
if rep_base_hash:
rpt_seq = rep_base_hash[family]
else:
rpt_seq = seq[start:finish]
fa_out.append([chr, len(sim_seq)+1, len(sim_seq) + len(rpt_seq), strand, family, rpt_class, rpt_id]) # Coords adjusted for biologist notation
sim_seq += rpt_seq.lower() if mask else rpt_seq.upper()
if rpt_count == num_repeats:
break
last_end = max(last_end, finish)
# Add final sequence on
final_seq_len = max(min_interval, min(chr_finish - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
sim_seq_len = len(sim_seq)
fa_out_str = fa_out_header
for chr, start, finish, strand, family, rpt_class, rpt_id in fa_out:
fa_out_str += fa_out_template.format(chr=chr, start=start, finish=finish, left = sim_seq_len - finish, strand=strand, family=family, rpt_class=rpt_class, rpt_id=rpt_id)
return sim_seq, fa_out_str
bases = set("ACGTacgt")
def loadSeqAndChain(seq_file, k, suppress_save = False, mc_file = None, retain_n = False):
"""Load the sequence and the Markov Chain List.
Load the MC list from a file if it exists. If not, create the chain
and save it to the file for the next use (skip the save if suppressed).
Parameters:
* seq_file: The sequence file.
* k: The order of the markov chain.
* suppress_save: Boolean. If true, don't save the generated MC file. (Can't imagine why we would want this.)
* mc_file: The name of the mc_file to use. (Derive from seq_file if not provided.)
* retrain_n: If false, we will be cutting of the largest possible N* prefix and suffix.
Return: A tuple:
1. The chromosome sequence.
2. The markov chain
3. Where we will start in the template sequence (in case a prefix has been removed).
4. Where we will end in the templace sequence (in case a suffix has been removed).
"""
template_seq = str(SeqIO.read(seq_file, 'fasta').seq)
# Cut out all the maximul prefix and suffix of ambiguity codes -- which will have no effect on the Markov chain construction.
start, finish = 0, len(template_seq)
if not retain_n: # Cut down the chromsome to the first real base at each end -- eliminate trailing Ns.
while template_seq[start] not in bases: start += 1
while template_seq[finish-1] not in bases: finish -= 1
mc_file = re.sub("\.(fa|fasta)$", ".pmc%d" % (k), seq_file) if mc_file is None else mc_file
if os.path.exists(mc_file):
markov_list = markov_gen.read_pmck(mc_file)
else:
markov_list = markov_gen.MarkovArray(k, template_seq)
if not suppress_save:
markov_gen.pickle_markov_list(markov_list, mc_file)
return template_seq, markov_list, start, finish
def readRepBase(file):
return {R.id:"".join([x for x in str(R.seq) if x.upper() in {'A', 'C', 'G', 'T'}]) for R in SeqIO.parse(file, 'fasta')}
low_complexity = {'Low_complexity', 'Simple', 'Satellite'}
def select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash):
"""Used to select those families that have at least f members on the template chromosome.
Parameters:
* repeat_file: the .fa.out file
* f minimum number of allowed instances in a family.
* num_fams: Number of families to be choosen
* use_3prime: if false, ignore instances on the 3' strand
* filter_set: families that should be ignored
* toss_low: if true, ignore low-complexity families
* rep_base_hash: a hash table mapping family names to their rep_base sequences
Returns:
* List of families chosen
"""
C = {} # Family name -> count
for T in nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}):
if rep_base_hash and not T[4] in rep_base_hash:
continue
if T[4] in C:
C[T[4]] += 1
else:
C[T[4]] = 1
L = [k for k in C if C[k] >= f]
if num_fams == None:
return L
if num_fams > len(L):
sys.stderr.write("Not enough families for f\n")
exit(1);
return L[:num_fams]
def create_chromosome_file(seq_file, repeat_file, output_file, k = 5, use_3prime = True, filter_file = "rpt_list.txt", mask = False, seed = None, suppress = False, max_interval = -1, min_interval = 0, retain_n = False, num_repeats = None, max_length = None, toss_low = False, rep_base = None, f = 1, num_fams = None):
"""
Create a simualted chrosome with real repeat sequences from a chromsoe file.
Parameters:
* seq_file: fasta <seq>.fa, file containing the template sequence.
-- Assumed to exist a file <seq>.fa.out containing the repeatmasker annotations.
* k: Use a k-order markov chain. There must exists a markov chain file <seq>.pmc<k>.
* output_file: Fasta file to print sequence to.
* use_3prime: If false, only sequence on the 5' strand will be used. Default: True
* filter_file: A list of the repeats that should be used. If empty: all repeats. Default: "rpt_list.txt"
* mask: If true: copied repeats will be in lower case. Default: False
* seed: RNG seed
"""
if not output_file.endswith(".fa"):
output_file += ".fa"
random.seed(args.seed)
# First: load in the template sequence, markov chain, and the start/end coords of what we are using.
template_seq, markov_list, chr_start, chr_finish = loadSeqAndChain(args.seq_file, args.k, suppress, args.mc_file, args.retain_n)
# Read in the set of families to be ignored
filter_set = {y.strip() for line in open(filter_file) for y in re.split("\s+", line.rstrip())} if filter_file else {}
# Read in the RepBase sequence: maps name -> RepBaseSequence
rep_base_hash = readRepBase(rep_base) if rep_base else None # Hash of repeats ID -> sequences)
# Pick which families we are using.
selected = select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash)
# Create a sequence generator
rpt_gen = nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}, I = selected)
# Create the simulated sequence
simulated_sequence, fa_out = generate_chromosome(seq = template_seq, markov_list = markov_list, chr_start = chr_start, chr_finish = chr_finish, rpt_gen = rpt_gen, mask = mask, max_interval = max_interval, min_interval = min_interval, num_repeats = num_repeats, max_length = max_length, rep_base_hash = rep_base_hash)
# Write output to file
SeqIO.write([SeqRecord(seq = Seq(simulated_sequence), id = "seq_file", description = "Simulated sequence from %s using order %d markov chain" % (seq_file, len(markov_list)-1))], output_file, 'fasta')
open(output_file + ".out", "w").write(fa_out)
if __name__ == "__main__":
args = parse_params(sys.argv[1:])
create_chromosome_file(seq_file = args.seq_file, k = args.k, output_file = args.output,
repeat_file = args.repeat_file, use_3prime = args.negative_strand,
filter_file = args.family_file, mask = args.mask, seed = args.seed,
max_interval = args.max_interval, min_interval = args.min_interval, num_repeats = args.num_repeats,
max_length = args.max_length, toss_low = not args.low_complexity,
rep_base = args.rep_base, f = args.family_min, num_fams = args.num_family)
|
max_interval = len(seq)
|
conditional_block
|
chromosome_simulator.py
|
import argparse
import sys
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import random
import sys
import re
import os.path
import markov_gen
def parse_params(args):
parser = argparse.ArgumentParser(description = "Generate simulated chromosome")
# parser.add_argument('-c', '--cutoff', type = int, help = "Limit model to first c non-N bases")
parser.add_argument('-k', type = int, help = "Order of Markov chain", default = 5)
parser.add_argument('-s', '--seed', '-rng_seed', dest = 'seed', type = int, help = "RNG seed", default = None)
parser.add_argument('-n', '--negative_strand', action = "store_true", help = "Use repeats on negative string", default = False)
parser.add_argument('--family_file', help = "List of repeat families to use", default = None)
parser.add_argument('-m', '--mask', action = "store_true", help = "Turn masking on (all repeats printed as lower case).", default = False)
parser.add_argument('--mc', '--mc_file', dest = 'mc_file', help = "MC File (by default -- look in local directory; generates if not found).", default = None)
parser.add_argument('-S', '--suppress_pmck', action = "store_true", help = "Suppress the generation of a .pmc<k> file to store the markov chain for re-use")
parser.add_argument('--mi', '--max_interval', dest = "max_interval", type = int, help = "Maximum allowed length of interval between repeats; -1 value (default) means no maximum", default = -1)
parser.add_argument('--mi2', '--min_interval', dest = "min_interval", type = int, help = "Min allowed length of interval between repeats", default = 0)
parser.add_argument('--rn', '--retain_n', dest = "retain_n", action = 'store_true', help = "If used, will use the whole chromosome. Otherwise, cuts of Ns at either end.", default = False)
parser.add_argument('--nr', '--num_repeats', dest = 'num_repeats', type = int, help = "Specify the number of repeats. Simulation will terminate either 1000 bases or max interval bases past the nth instance of a repeat (excluding any other repeats in that range).", default = None)
parser.add_argument('-l', '--max_length', dest = 'max_length', type = int, help = "Maximum allowed length of simulated sequence.", default = None)
parser.add_argument('--lc', '--low_complexity', dest = 'low_complexity', action = 'store_true', help = "Keep low complexity and simple repeats (kept by default)", default = False)
parser.add_argument('--rb', '--rep_base', dest = 'rep_base', help = "Replace each TE with a ful copy of its ancestral seqeunce in the specified RepBase file", default = None)
parser.add_argument('-f', '--family_min', dest = "family_min", type = int, help = "Number of elements per family", default = 2)
parser.add_argument('--nf', '--num_family', dest = 'num_family', type = int, help = "Number of families", default = None)
#parser.add_argument('-o', '--output', help = "Output file (Default: replace chomosome file \".fa\" with \".sim.fa\")")
parser.add_argument("seq_file", help = "Sequence file (must be .fa)")
parser.add_argument("repeat_file", help = "RepeatMasker file (.fa.out)")
parser.add_argument("output", help = "Output file")
return parser.parse_args(args)
def nextRepeat(rpt_file, use_negative = True, S = {}, E = {}, I = {}):
"""Generator: each invokation returns the chromosome, start, finish, starand,
and family for the next repeat of the repeatmasker .fa.out files. S, if not empty,
is a filter for which repeats to use."""
fp = open(rpt_file)
fp.readline()
fp.readline()
for line in fp:
if line.rstrip():
A = re.split("\s+", line.strip())
chr, start, finish, strand, family, rpt_class, rpt_id = A[4], int(A[5])-1, int(A[6]), A[8], A[9], A[10], A[14]
if strand == '-' and not use_negative:
continue
if S and any([s in family for s in S]):
continue
if E and any([e in rpt_class for e in E]):
continue
if I and not family in I:
continue
if (strand == '+' or use_negative) and ((family in S) or not S) and not (rpt_class in E):
yield chr, start, finish, strand, family, rpt_class, int(rpt_id)
# fa_out_header: The fixed header lines for the .fa.out file
fa_out_header = "\tSW\tperc\tperc\tperc\tquery\tposition in query\tmatching\trepeat\tposition in repeat\n\tscore\tdiv.\tdel.\tins.\tsequence\tbegin\tend\t(left)\trepeat\tclass/family\tbegin\tend (left)\tID\n"
# fa_out_template: A template for creating lines for the .fa.out file.
fa_out_template = "\t0\t0\t0\t0\t{chr}\t{start}\t{finish}\t({left})\t{strand}\t{family}\t{rpt_class}\t0\t0\t(0)\t{rpt_id}\n"
def generate_chromosome(seq, markov_list, chr_start, chr_finish, rpt_gen, mask = False, max_interval = -1, min_interval = 0,num_repeats = None, max_length = None, limiting_chr = None, rep_base_hash = None):
"""
Generate a syntehtic sequence with real repeats:
* seq: A sequence (as a string).
* markov_list: List of the k+1 i-th order markov chains (from the markov_gen module).
* start/finish: Defined the coordinates of our actual template sequence. (We are ignoring anything that occurs before/faster.
* Allows us to cut of a prefix and/or suffix.
* rpt_gen: A generating function returning the repeat information (created by nextRepeat)
* mask: If true, all repeats will be lower-case. Otherwise, upper case.)
* max_interval: Maximum inter-repeat length.
* min_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* extend the length.
* max_interval: Minimum allowed length of a sequence between repeats. If two repeats are closer than this,
* cut the length.
"""
last_end = chr_start
if max_interval == -1:
max_interval = len(seq)
sim_seq = "" # Simulated sequence
fa_out = [] # Hold the new .fa.out file contents (by line)
rpt_count = 0 # Count of repeats (so we can quit when we reach num_repeats, if applicable)
for chr, start, finish, strand, family, rpt_class, rpt_id in rpt_gen:
if limiting_chr and chr not in limiting_chr: # Skip if we are on the wrong chromsome
continue
if start >= chr_finish: # Quit if we have gone past the allowed range (repeats are assumed to be sorted by start)
break
if start < chr_start or finish > chr_finish: # Skip if we are outside the allowed range
continue
if start < last_end: # Skip if this repeat overlapped the last one
continue
rpt_count += 1
# Add the next inter-TE sequence
inter_seq_len = max(min_interval, min(start - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
# Add the next sequence
if rep_base_hash:
rpt_seq = rep_base_hash[family]
else:
rpt_seq = seq[start:finish]
fa_out.append([chr, len(sim_seq)+1, len(sim_seq) + len(rpt_seq), strand, family, rpt_class, rpt_id]) # Coords adjusted for biologist notation
sim_seq += rpt_seq.lower() if mask else rpt_seq.upper()
if rpt_count == num_repeats:
break
last_end = max(last_end, finish)
# Add final sequence on
final_seq_len = max(min_interval, min(chr_finish - last_end, max_interval))
sim_seq += markov_gen.generate_sequence(markov_list, inter_seq_len)
sim_seq_len = len(sim_seq)
fa_out_str = fa_out_header
for chr, start, finish, strand, family, rpt_class, rpt_id in fa_out:
fa_out_str += fa_out_template.format(chr=chr, start=start, finish=finish, left = sim_seq_len - finish, strand=strand, family=family, rpt_class=rpt_class, rpt_id=rpt_id)
return sim_seq, fa_out_str
bases = set("ACGTacgt")
def loadSeqAndChain(seq_file, k, suppress_save = False, mc_file = None, retain_n = False):
"""Load the sequence and the Markov Chain List.
Load the MC list from a file if it exists. If not, create the chain
and save it to the file for the next use (skip the save if suppressed).
Parameters:
* seq_file: The sequence file.
* k: The order of the markov chain.
* suppress_save: Boolean. If true, don't save the generated MC file. (Can't imagine why we would want this.)
* mc_file: The name of the mc_file to use. (Derive from seq_file if not provided.)
* retrain_n: If false, we will be cutting of the largest possible N* prefix and suffix.
Return: A tuple:
1. The chromosome sequence.
2. The markov chain
3. Where we will start in the template sequence (in case a prefix has been removed).
4. Where we will end in the templace sequence (in case a suffix has been removed).
"""
template_seq = str(SeqIO.read(seq_file, 'fasta').seq)
# Cut out all the maximul prefix and suffix of ambiguity codes -- which will have no effect on the Markov chain construction.
start, finish = 0, len(template_seq)
if not retain_n: # Cut down the chromsome to the first real base at each end -- eliminate trailing Ns.
while template_seq[start] not in bases: start += 1
while template_seq[finish-1] not in bases: finish -= 1
mc_file = re.sub("\.(fa|fasta)$", ".pmc%d" % (k), seq_file) if mc_file is None else mc_file
if os.path.exists(mc_file):
markov_list = markov_gen.read_pmck(mc_file)
else:
markov_list = markov_gen.MarkovArray(k, template_seq)
if not suppress_save:
markov_gen.pickle_markov_list(markov_list, mc_file)
return template_seq, markov_list, start, finish
def readRepBase(file):
return {R.id:"".join([x for x in str(R.seq) if x.upper() in {'A', 'C', 'G', 'T'}]) for R in SeqIO.parse(file, 'fasta')}
low_complexity = {'Low_complexity', 'Simple', 'Satellite'}
def select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash):
"""Used to select those families that have at least f members on the template chromosome.
Parameters:
* repeat_file: the .fa.out file
* f minimum number of allowed instances in a family.
* num_fams: Number of families to be choosen
* use_3prime: if false, ignore instances on the 3' strand
* filter_set: families that should be ignored
* toss_low: if true, ignore low-complexity families
* rep_base_hash: a hash table mapping family names to their rep_base sequences
Returns:
* List of families chosen
"""
C = {} # Family name -> count
for T in nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}):
if rep_base_hash and not T[4] in rep_base_hash:
continue
if T[4] in C:
C[T[4]] += 1
else:
C[T[4]] = 1
L = [k for k in C if C[k] >= f]
if num_fams == None:
return L
if num_fams > len(L):
sys.stderr.write("Not enough families for f\n")
exit(1);
return L[:num_fams]
def create_chromosome_file(seq_file, repeat_file, output_file, k = 5, use_3prime = True, filter_file = "rpt_list.txt", mask = False, seed = None, suppress = False, max_interval = -1, min_interval = 0, retain_n = False, num_repeats = None, max_length = None, toss_low = False, rep_base = None, f = 1, num_fams = None):
"""
Create a simualted chrosome with real repeat sequences from a chromsoe file.
Parameters:
* seq_file: fasta <seq>.fa, file containing the template sequence.
-- Assumed to exist a file <seq>.fa.out containing the repeatmasker annotations.
* k: Use a k-order markov chain. There must exists a markov chain file <seq>.pmc<k>.
* output_file: Fasta file to print sequence to.
* use_3prime: If false, only sequence on the 5' strand will be used. Default: True
* filter_file: A list of the repeats that should be used. If empty: all repeats. Default: "rpt_list.txt"
* mask: If true: copied repeats will be in lower case. Default: False
|
random.seed(args.seed)
# First: load in the template sequence, markov chain, and the start/end coords of what we are using.
template_seq, markov_list, chr_start, chr_finish = loadSeqAndChain(args.seq_file, args.k, suppress, args.mc_file, args.retain_n)
# Read in the set of families to be ignored
filter_set = {y.strip() for line in open(filter_file) for y in re.split("\s+", line.rstrip())} if filter_file else {}
# Read in the RepBase sequence: maps name -> RepBaseSequence
rep_base_hash = readRepBase(rep_base) if rep_base else None # Hash of repeats ID -> sequences)
# Pick which families we are using.
selected = select_families(repeat_file, f, num_fams, use_3prime, filter_set, toss_low, rep_base_hash)
# Create a sequence generator
rpt_gen = nextRepeat(repeat_file, use_3prime, filter_set, E = low_complexity if toss_low else {}, I = selected)
# Create the simulated sequence
simulated_sequence, fa_out = generate_chromosome(seq = template_seq, markov_list = markov_list, chr_start = chr_start, chr_finish = chr_finish, rpt_gen = rpt_gen, mask = mask, max_interval = max_interval, min_interval = min_interval, num_repeats = num_repeats, max_length = max_length, rep_base_hash = rep_base_hash)
# Write output to file
SeqIO.write([SeqRecord(seq = Seq(simulated_sequence), id = "seq_file", description = "Simulated sequence from %s using order %d markov chain" % (seq_file, len(markov_list)-1))], output_file, 'fasta')
open(output_file + ".out", "w").write(fa_out)
if __name__ == "__main__":
args = parse_params(sys.argv[1:])
create_chromosome_file(seq_file = args.seq_file, k = args.k, output_file = args.output,
repeat_file = args.repeat_file, use_3prime = args.negative_strand,
filter_file = args.family_file, mask = args.mask, seed = args.seed,
max_interval = args.max_interval, min_interval = args.min_interval, num_repeats = args.num_repeats,
max_length = args.max_length, toss_low = not args.low_complexity,
rep_base = args.rep_base, f = args.family_min, num_fams = args.num_family)
|
* seed: RNG seed
"""
if not output_file.endswith(".fa"):
output_file += ".fa"
|
random_line_split
|
Alternative.ts
|
/*
* @Author: aaronpmishkin
* @Date: 2016-05-25 16:41:41
* @Last Modified by: aaronpmishkin
* @Last Modified time: 2016-09-22 20:35:14
*/
// Import Utility Classes:
import * as Formatter from '../modules/utilities/classes/Formatter';
/*
This class is the data representation of a decision option in a ValueChart. It uses an internal map object to associate
decision consequences with PrimitiveObjectives in the ValueChart. Each instance of the Alternative class must be a complete
mapping of a consequence to each PrimitiveObjective in the ValueChart to be valid, and each consequence must be within the
domain of the corresponding PrimitiveObjective. It is best to think about Alternatives as points in the consequence space
defined by the ValueChart's set of PrimitiveObjectives.
*/
export class Alternative {
// ========================================================================================
// Fields
// ========================================================================================
private name: string; // The name of the Alternative
private id: string; // The name of the Alternative formatted for use as a HTML id.
private description: string; // The description of the alternative.
private objectiveValues: Map<string, string | number>; // The internal Map objective used to match consequences to the name of the associated PrimitiveObjective.
// ========================================================================================
// Constructor
// ========================================================================================
/*
@param name - The name of the Alternative.
@param description - The description of the Alternative.
@returns {void}
@description Constructs a new Alternative with no consequences. Objective consequences for the new
Alternative must he added manually using the setObjectiveValue method.
*/
constructor(name: string, description: string) {
this.name = name;
this.description = description;
this.objectiveValues = new Map<string, string | number>();
this.id = Formatter.nameToID(this.name);
}
// ========================================================================================
// Methods
// ========================================================================================
getId(): string {
return this.id;
}
getName(): string {
return this.name;
}
setName(name: string): void
|
getDescription(): string {
return this.description;
}
setDescription(description: string): void {
this.description = description;
}
getObjectiveValue(objectiveName: string): string | number {
return this.objectiveValues.get(objectiveName);
}
/*
@returns {{ objectiveName: string, value: string | number }[]} - The collection of the Alternative's consequence paired with the associated objective's name.
@description Iterates over the objectiveValues to return an array of objective names paired with the Alternative's
consequence for that objective.
*/
getAllObjectiveValuePairs(): { objectiveName: string, value: string | number }[] {
var objectiveValuePairs: { objectiveName: string, value: string | number }[] = [];
var mapIterator: Iterator<string> = this.objectiveValues.keys();
var iteratorElement: IteratorResult<string> = mapIterator.next();
while (iteratorElement.done === false) {
objectiveValuePairs.push({ objectiveName: iteratorElement.value, value: this.objectiveValues.get(iteratorElement.value) });
iteratorElement = mapIterator.next();
}
return objectiveValuePairs;
}
setObjectiveValue(objectiveName: string, value: string | number): void {
this.objectiveValues.set(objectiveName, value);
}
removeObjective(objectiveName: string): void {
this.objectiveValues.delete(objectiveName);
}
}
|
{
this.name = name;
this.id = Formatter.nameToID(this.name);
}
|
identifier_body
|
Alternative.ts
|
/*
* @Author: aaronpmishkin
* @Date: 2016-05-25 16:41:41
* @Last Modified by: aaronpmishkin
* @Last Modified time: 2016-09-22 20:35:14
*/
// Import Utility Classes:
import * as Formatter from '../modules/utilities/classes/Formatter';
/*
This class is the data representation of a decision option in a ValueChart. It uses an internal map object to associate
decision consequences with PrimitiveObjectives in the ValueChart. Each instance of the Alternative class must be a complete
mapping of a consequence to each PrimitiveObjective in the ValueChart to be valid, and each consequence must be within the
domain of the corresponding PrimitiveObjective. It is best to think about Alternatives as points in the consequence space
defined by the ValueChart's set of PrimitiveObjectives.
*/
export class Alternative {
// ========================================================================================
// Fields
// ========================================================================================
private name: string; // The name of the Alternative
private id: string; // The name of the Alternative formatted for use as a HTML id.
private description: string; // The description of the alternative.
private objectiveValues: Map<string, string | number>; // The internal Map objective used to match consequences to the name of the associated PrimitiveObjective.
// ========================================================================================
// Constructor
// ========================================================================================
/*
@param name - The name of the Alternative.
@param description - The description of the Alternative.
@returns {void}
@description Constructs a new Alternative with no consequences. Objective consequences for the new
Alternative must he added manually using the setObjectiveValue method.
*/
constructor(name: string, description: string) {
this.name = name;
this.description = description;
this.objectiveValues = new Map<string, string | number>();
this.id = Formatter.nameToID(this.name);
}
// ========================================================================================
// Methods
// ========================================================================================
getId(): string {
return this.id;
}
|
(): string {
return this.name;
}
setName(name: string): void {
this.name = name;
this.id = Formatter.nameToID(this.name);
}
getDescription(): string {
return this.description;
}
setDescription(description: string): void {
this.description = description;
}
getObjectiveValue(objectiveName: string): string | number {
return this.objectiveValues.get(objectiveName);
}
/*
@returns {{ objectiveName: string, value: string | number }[]} - The collection of the Alternative's consequence paired with the associated objective's name.
@description Iterates over the objectiveValues to return an array of objective names paired with the Alternative's
consequence for that objective.
*/
getAllObjectiveValuePairs(): { objectiveName: string, value: string | number }[] {
var objectiveValuePairs: { objectiveName: string, value: string | number }[] = [];
var mapIterator: Iterator<string> = this.objectiveValues.keys();
var iteratorElement: IteratorResult<string> = mapIterator.next();
while (iteratorElement.done === false) {
objectiveValuePairs.push({ objectiveName: iteratorElement.value, value: this.objectiveValues.get(iteratorElement.value) });
iteratorElement = mapIterator.next();
}
return objectiveValuePairs;
}
setObjectiveValue(objectiveName: string, value: string | number): void {
this.objectiveValues.set(objectiveName, value);
}
removeObjective(objectiveName: string): void {
this.objectiveValues.delete(objectiveName);
}
}
|
getName
|
identifier_name
|
Alternative.ts
|
/*
* @Author: aaronpmishkin
* @Date: 2016-05-25 16:41:41
* @Last Modified by: aaronpmishkin
* @Last Modified time: 2016-09-22 20:35:14
*/
// Import Utility Classes:
import * as Formatter from '../modules/utilities/classes/Formatter';
/*
This class is the data representation of a decision option in a ValueChart. It uses an internal map object to associate
decision consequences with PrimitiveObjectives in the ValueChart. Each instance of the Alternative class must be a complete
mapping of a consequence to each PrimitiveObjective in the ValueChart to be valid, and each consequence must be within the
domain of the corresponding PrimitiveObjective. It is best to think about Alternatives as points in the consequence space
defined by the ValueChart's set of PrimitiveObjectives.
*/
export class Alternative {
// ========================================================================================
// Fields
// ========================================================================================
private name: string; // The name of the Alternative
private id: string; // The name of the Alternative formatted for use as a HTML id.
private description: string; // The description of the alternative.
private objectiveValues: Map<string, string | number>; // The internal Map objective used to match consequences to the name of the associated PrimitiveObjective.
// ========================================================================================
// Constructor
// ========================================================================================
/*
@param name - The name of the Alternative.
@param description - The description of the Alternative.
@returns {void}
@description Constructs a new Alternative with no consequences. Objective consequences for the new
Alternative must he added manually using the setObjectiveValue method.
*/
constructor(name: string, description: string) {
this.name = name;
this.description = description;
this.objectiveValues = new Map<string, string | number>();
this.id = Formatter.nameToID(this.name);
}
// ========================================================================================
// Methods
// ========================================================================================
getId(): string {
return this.id;
}
getName(): string {
return this.name;
}
setName(name: string): void {
this.name = name;
this.id = Formatter.nameToID(this.name);
}
getDescription(): string {
return this.description;
}
setDescription(description: string): void {
this.description = description;
}
getObjectiveValue(objectiveName: string): string | number {
return this.objectiveValues.get(objectiveName);
}
/*
@returns {{ objectiveName: string, value: string | number }[]} - The collection of the Alternative's consequence paired with the associated objective's name.
@description Iterates over the objectiveValues to return an array of objective names paired with the Alternative's
consequence for that objective.
*/
getAllObjectiveValuePairs(): { objectiveName: string, value: string | number }[] {
var objectiveValuePairs: { objectiveName: string, value: string | number }[] = [];
var mapIterator: Iterator<string> = this.objectiveValues.keys();
var iteratorElement: IteratorResult<string> = mapIterator.next();
while (iteratorElement.done === false) {
objectiveValuePairs.push({ objectiveName: iteratorElement.value, value: this.objectiveValues.get(iteratorElement.value) });
iteratorElement = mapIterator.next();
}
return objectiveValuePairs;
}
|
setObjectiveValue(objectiveName: string, value: string | number): void {
this.objectiveValues.set(objectiveName, value);
}
removeObjective(objectiveName: string): void {
this.objectiveValues.delete(objectiveName);
}
}
|
random_line_split
|
|
Alternative.ts
|
/*
* @Author: aaronpmishkin
* @Date: 2016-05-25 16:41:41
* @Last Modified by: aaronpmishkin
* @Last Modified time: 2016-09-22 20:35:14
*/
// Import Utility Classes:
import * as Formatter from '../modules/utilities/classes/Formatter';
/*
This class is the data representation of a decision option in a ValueChart. It uses an internal map object to associate
decision consequences with PrimitiveObjectives in the ValueChart. Each instance of the Alternative class must be a complete
mapping of a consequence to each PrimitiveObjective in the ValueChart to be valid, and each consequence must be within the
domain of the corresponding PrimitiveObjective. It is best to think about Alternatives as points in the consequence space
defined by the ValueChart's set of PrimitiveObjectives.
*/
export class Alternative {
// ========================================================================================
// Fields
// ========================================================================================
private name: string; // The name of the Alternative
private id: string; // The name of the Alternative formatted for use as a HTML id.
private description: string; // The description of the alternative.
private objectiveValues: Map<string, string | number>; // The internal Map objective used to match consequences to the name of the associated PrimitiveObjective.
// ========================================================================================
// Constructor
// ========================================================================================
/*
@param name - The name of the Alternative.
@param description - The description of the Alternative.
@returns {void}
@description Constructs a new Alternative with no consequences. Objective consequences for the new
Alternative must he added manually using the setObjectiveValue method.
*/
constructor(name: string, description: string) {
this.name = name;
this.description = description;
this.objectiveValues = new Map<string, string | number>();
this.id = Formatter.nameToID(this.name);
}
// ========================================================================================
// Methods
// ========================================================================================
getId(): string {
return this.id;
}
getName(): string {
return this.name;
}
setName(name: string): void {
this.name = name;
this.id = Formatter.nameToID(this.name);
}
getDescription(): string {
return this.description;
}
setDescription(description: string): void {
this.description = description;
}
getObjectiveValue(objectiveName: string): string | number {
return this.objectiveValues.get(objectiveName);
}
/*
@returns {{ objectiveName: string, value: string | number }[]} - The collection of the Alternative's consequence paired with the associated objective's name.
@description Iterates over the objectiveValues to return an array of objective names paired with the Alternative's
consequence for that objective.
*/
getAllObjectiveValuePairs(): { objectiveName: string, value: string | number }[] {
var objectiveValuePairs: { objectiveName: string, value: string | number }[] = [];
var mapIterator: Iterator<string> = this.objectiveValues.keys();
var iteratorElement: IteratorResult<string> = mapIterator.next();
while (iteratorElement.done === false)
|
return objectiveValuePairs;
}
setObjectiveValue(objectiveName: string, value: string | number): void {
this.objectiveValues.set(objectiveName, value);
}
removeObjective(objectiveName: string): void {
this.objectiveValues.delete(objectiveName);
}
}
|
{
objectiveValuePairs.push({ objectiveName: iteratorElement.value, value: this.objectiveValues.get(iteratorElement.value) });
iteratorElement = mapIterator.next();
}
|
conditional_block
|
schedule.service.ts
|
import {
Injectable,
OnDestroy,
Injector,
} from '@angular/core';
import { Store } from '@ngrx/store';
import { Observable, Subject, Subscription } from 'rxjs';
import { v1 as uuidV1 } from 'uuid';
import * as schedule from 'node-schedule';
import {
UtilService,
WebNotificationService,
MailNotificationService,
} from './';
import { tasksTypes } from '../index';
@Injectable()
export class ScheduleService implements OnDestroy {
public tasks$: Observable<Task[]>;
public scheduleEmitter = new Subject<TaskSchedule>();
public jobs: schedule.Job[] = [];
private scheduleServiceSub: Subscription;
constructor(
public store: Store<RXState>,
public injector: Injector,
public notificationService: WebNotificationService,
public mailNotificationService: MailNotificationService,
) {
// Combine the result of all the observables into the last method.
this.scheduleServiceSub = Observable.combineLatest(
// Get scheduleLists, filter them by active and remove the 'Show all' one.
this.store
.select<Folder[]>('folders')
.map(folders => folders.filter(folder => folder.id !== '' && folder.active))
.map(folders => folders.map(folder => folder.id)),
// Get taskSchedules, filter them by active and remove the 'Show all' one.
this.store
.select<TaskSchedule[]>('taskSchedules')
.map((taskSchedules) => taskSchedules.filter((taskSchedule) => taskSchedule.id !== '' && taskSchedule.active)),
// Filter all taskSchedules that are included in the actives scheduleLists
(folderIds, taskSchedules) => {
return taskSchedules.filter(taskSchedule => folderIds.includes(taskSchedule.folderId))
}
)
.subscribe((taskSchedules) => {
// Cancel all jobs, then schedule new ones with the filtered by combineLatest observable.
this.cancelJobs();
this.scheduleJobs(taskSchedules);
})
// Tasks observable
this.tasks$ = this.store.select<Task[]>('tasks')
// Execute taskSchedule using the 'executeTasks' method.
this.scheduleEmitter.subscribe((taskSchedule) => {
this.executeTasks(taskSchedule);
});
}
public ngOnDestroy() {
this.scheduleServiceSub && this.scheduleServiceSub.unsubscribe();
this.cancelJobs();
}
private cancelJobs() {
console.log('Cancelling jobs');
this.jobs.forEach((job) => job.cancel());
}
// Schedule taskSchedules passed as param
private scheduleJobs(taskSchedules: TaskSchedule[]) {
console.log('Scheduling jobs');
this.jobs = [
...taskSchedules.map((taskSchedule) => {
// Extract recurrence rule from taskSchedule using Util method 'templateStringSingleLine'.
const rule = UtilService.templateStringSingleLine(`
${taskSchedule.second || '*'}
${taskSchedule.minute || '*'}
${taskSchedule.hour || '*'}
${taskSchedule.dayOfMonth || '*'}
${taskSchedule.month || '*'}
${taskSchedule.dayOfWeek || '*'}
`);
const ruleObj = { rule } as schedule.RecurrenceSpecDateRange;
// Add date range if defined in the taskSchedule
if (taskSchedule.useDateRange) {
Object.assign(ruleObj, {
start: taskSchedule.start,
end: taskSchedule.end,
});
}
// Schedule job and emit event when it gets executed with callback
return schedule.scheduleJob(taskSchedule.name, ruleObj, () => {
this.scheduleEmitter.next(taskSchedule);
})
})
].filter(job => job);
}
/**
* Execute active tasks associated to taskSchedule
*
* @param {TaskSchedule} taskSchedule taskSchedule to be executed.
*
* @memberOf ScheduleService
*/
public async executeTasks(taskSchedule: TaskSchedule) {
console.log('Executing TaskSchedule: ', taskSchedule.name);
const tasks = await this.tasks$
.map((tasksArr) =>
tasksArr.filter(task =>
task.taskScheduleId === taskSchedule.id
))
.take(1)
.toPromise()
try {
const taskData = [];
for (const [taskIndex, task] of Array.from(tasks.entries())) {
const taskType = tasksTypes.find(taskTypeItem => taskTypeItem.type === task.type.type)
const taskExecutor = this.injector.get(taskType.executor)
await taskExecutor.executeTask(task, taskData, taskIndex)
}
const executedAt = new Date()
|
this.store.dispatch({
type: 'SET_TASK_SCHEDULE_EXECUTED_AT',
payload: {
id: taskSchedule.id,
executedAt,
} as TaskScheduleExecutedAt,
})
} catch (error) {
if (error === false) return;
console.error('Error happened executing taskSchedule: ', taskSchedule, 'Error: ', error);
this.notificationService.createErrorNotification({
title: 'Error executing task schedule',
body: `Task schedule: ${taskSchedule.name}\nError: ${JSON.stringify(error)}`,
tag: 'ScheduleService-taskExecutor-error',
})
if (!taskSchedule.mailNotify) return;
this.mailNotificationService.sendMail({
to: taskSchedule.mailAddress,
html: `
Task schedule: ${ JSON.stringify(taskSchedule, null, 2) }<br>
Error: ${error.message} ${JSON.stringify(error)}
`,
})
}
}
}
|
random_line_split
|
|
schedule.service.ts
|
import {
Injectable,
OnDestroy,
Injector,
} from '@angular/core';
import { Store } from '@ngrx/store';
import { Observable, Subject, Subscription } from 'rxjs';
import { v1 as uuidV1 } from 'uuid';
import * as schedule from 'node-schedule';
import {
UtilService,
WebNotificationService,
MailNotificationService,
} from './';
import { tasksTypes } from '../index';
@Injectable()
export class ScheduleService implements OnDestroy {
public tasks$: Observable<Task[]>;
public scheduleEmitter = new Subject<TaskSchedule>();
public jobs: schedule.Job[] = [];
private scheduleServiceSub: Subscription;
constructor(
public store: Store<RXState>,
public injector: Injector,
public notificationService: WebNotificationService,
public mailNotificationService: MailNotificationService,
) {
// Combine the result of all the observables into the last method.
this.scheduleServiceSub = Observable.combineLatest(
// Get scheduleLists, filter them by active and remove the 'Show all' one.
this.store
.select<Folder[]>('folders')
.map(folders => folders.filter(folder => folder.id !== '' && folder.active))
.map(folders => folders.map(folder => folder.id)),
// Get taskSchedules, filter them by active and remove the 'Show all' one.
this.store
.select<TaskSchedule[]>('taskSchedules')
.map((taskSchedules) => taskSchedules.filter((taskSchedule) => taskSchedule.id !== '' && taskSchedule.active)),
// Filter all taskSchedules that are included in the actives scheduleLists
(folderIds, taskSchedules) => {
return taskSchedules.filter(taskSchedule => folderIds.includes(taskSchedule.folderId))
}
)
.subscribe((taskSchedules) => {
// Cancel all jobs, then schedule new ones with the filtered by combineLatest observable.
this.cancelJobs();
this.scheduleJobs(taskSchedules);
})
// Tasks observable
this.tasks$ = this.store.select<Task[]>('tasks')
// Execute taskSchedule using the 'executeTasks' method.
this.scheduleEmitter.subscribe((taskSchedule) => {
this.executeTasks(taskSchedule);
});
}
public ngOnDestroy() {
this.scheduleServiceSub && this.scheduleServiceSub.unsubscribe();
this.cancelJobs();
}
private cancelJobs() {
console.log('Cancelling jobs');
this.jobs.forEach((job) => job.cancel());
}
// Schedule taskSchedules passed as param
private scheduleJobs(taskSchedules: TaskSchedule[]) {
console.log('Scheduling jobs');
this.jobs = [
...taskSchedules.map((taskSchedule) => {
// Extract recurrence rule from taskSchedule using Util method 'templateStringSingleLine'.
const rule = UtilService.templateStringSingleLine(`
${taskSchedule.second || '*'}
${taskSchedule.minute || '*'}
${taskSchedule.hour || '*'}
${taskSchedule.dayOfMonth || '*'}
${taskSchedule.month || '*'}
${taskSchedule.dayOfWeek || '*'}
`);
const ruleObj = { rule } as schedule.RecurrenceSpecDateRange;
// Add date range if defined in the taskSchedule
if (taskSchedule.useDateRange) {
Object.assign(ruleObj, {
start: taskSchedule.start,
end: taskSchedule.end,
});
}
// Schedule job and emit event when it gets executed with callback
return schedule.scheduleJob(taskSchedule.name, ruleObj, () => {
this.scheduleEmitter.next(taskSchedule);
})
})
].filter(job => job);
}
/**
* Execute active tasks associated to taskSchedule
*
* @param {TaskSchedule} taskSchedule taskSchedule to be executed.
*
* @memberOf ScheduleService
*/
public async executeTasks(taskSchedule: TaskSchedule)
|
}
|
{
console.log('Executing TaskSchedule: ', taskSchedule.name);
const tasks = await this.tasks$
.map((tasksArr) =>
tasksArr.filter(task =>
task.taskScheduleId === taskSchedule.id
))
.take(1)
.toPromise()
try {
const taskData = [];
for (const [taskIndex, task] of Array.from(tasks.entries())) {
const taskType = tasksTypes.find(taskTypeItem => taskTypeItem.type === task.type.type)
const taskExecutor = this.injector.get(taskType.executor)
await taskExecutor.executeTask(task, taskData, taskIndex)
}
const executedAt = new Date()
this.store.dispatch({
type: 'SET_TASK_SCHEDULE_EXECUTED_AT',
payload: {
id: taskSchedule.id,
executedAt,
} as TaskScheduleExecutedAt,
})
} catch (error) {
if (error === false) return;
console.error('Error happened executing taskSchedule: ', taskSchedule, 'Error: ', error);
this.notificationService.createErrorNotification({
title: 'Error executing task schedule',
body: `Task schedule: ${taskSchedule.name}\nError: ${JSON.stringify(error)}`,
tag: 'ScheduleService-taskExecutor-error',
})
if (!taskSchedule.mailNotify) return;
this.mailNotificationService.sendMail({
to: taskSchedule.mailAddress,
html: `
Task schedule: ${ JSON.stringify(taskSchedule, null, 2) }<br>
Error: ${error.message} ${JSON.stringify(error)}
`,
})
}
}
|
identifier_body
|
schedule.service.ts
|
import {
Injectable,
OnDestroy,
Injector,
} from '@angular/core';
import { Store } from '@ngrx/store';
import { Observable, Subject, Subscription } from 'rxjs';
import { v1 as uuidV1 } from 'uuid';
import * as schedule from 'node-schedule';
import {
UtilService,
WebNotificationService,
MailNotificationService,
} from './';
import { tasksTypes } from '../index';
@Injectable()
export class
|
implements OnDestroy {
public tasks$: Observable<Task[]>;
public scheduleEmitter = new Subject<TaskSchedule>();
public jobs: schedule.Job[] = [];
private scheduleServiceSub: Subscription;
constructor(
public store: Store<RXState>,
public injector: Injector,
public notificationService: WebNotificationService,
public mailNotificationService: MailNotificationService,
) {
// Combine the result of all the observables into the last method.
this.scheduleServiceSub = Observable.combineLatest(
// Get scheduleLists, filter them by active and remove the 'Show all' one.
this.store
.select<Folder[]>('folders')
.map(folders => folders.filter(folder => folder.id !== '' && folder.active))
.map(folders => folders.map(folder => folder.id)),
// Get taskSchedules, filter them by active and remove the 'Show all' one.
this.store
.select<TaskSchedule[]>('taskSchedules')
.map((taskSchedules) => taskSchedules.filter((taskSchedule) => taskSchedule.id !== '' && taskSchedule.active)),
// Filter all taskSchedules that are included in the actives scheduleLists
(folderIds, taskSchedules) => {
return taskSchedules.filter(taskSchedule => folderIds.includes(taskSchedule.folderId))
}
)
.subscribe((taskSchedules) => {
// Cancel all jobs, then schedule new ones with the filtered by combineLatest observable.
this.cancelJobs();
this.scheduleJobs(taskSchedules);
})
// Tasks observable
this.tasks$ = this.store.select<Task[]>('tasks')
// Execute taskSchedule using the 'executeTasks' method.
this.scheduleEmitter.subscribe((taskSchedule) => {
this.executeTasks(taskSchedule);
});
}
public ngOnDestroy() {
this.scheduleServiceSub && this.scheduleServiceSub.unsubscribe();
this.cancelJobs();
}
private cancelJobs() {
console.log('Cancelling jobs');
this.jobs.forEach((job) => job.cancel());
}
// Schedule taskSchedules passed as param
private scheduleJobs(taskSchedules: TaskSchedule[]) {
console.log('Scheduling jobs');
this.jobs = [
...taskSchedules.map((taskSchedule) => {
// Extract recurrence rule from taskSchedule using Util method 'templateStringSingleLine'.
const rule = UtilService.templateStringSingleLine(`
${taskSchedule.second || '*'}
${taskSchedule.minute || '*'}
${taskSchedule.hour || '*'}
${taskSchedule.dayOfMonth || '*'}
${taskSchedule.month || '*'}
${taskSchedule.dayOfWeek || '*'}
`);
const ruleObj = { rule } as schedule.RecurrenceSpecDateRange;
// Add date range if defined in the taskSchedule
if (taskSchedule.useDateRange) {
Object.assign(ruleObj, {
start: taskSchedule.start,
end: taskSchedule.end,
});
}
// Schedule job and emit event when it gets executed with callback
return schedule.scheduleJob(taskSchedule.name, ruleObj, () => {
this.scheduleEmitter.next(taskSchedule);
})
})
].filter(job => job);
}
/**
* Execute active tasks associated to taskSchedule
*
* @param {TaskSchedule} taskSchedule taskSchedule to be executed.
*
* @memberOf ScheduleService
*/
public async executeTasks(taskSchedule: TaskSchedule) {
console.log('Executing TaskSchedule: ', taskSchedule.name);
const tasks = await this.tasks$
.map((tasksArr) =>
tasksArr.filter(task =>
task.taskScheduleId === taskSchedule.id
))
.take(1)
.toPromise()
try {
const taskData = [];
for (const [taskIndex, task] of Array.from(tasks.entries())) {
const taskType = tasksTypes.find(taskTypeItem => taskTypeItem.type === task.type.type)
const taskExecutor = this.injector.get(taskType.executor)
await taskExecutor.executeTask(task, taskData, taskIndex)
}
const executedAt = new Date()
this.store.dispatch({
type: 'SET_TASK_SCHEDULE_EXECUTED_AT',
payload: {
id: taskSchedule.id,
executedAt,
} as TaskScheduleExecutedAt,
})
} catch (error) {
if (error === false) return;
console.error('Error happened executing taskSchedule: ', taskSchedule, 'Error: ', error);
this.notificationService.createErrorNotification({
title: 'Error executing task schedule',
body: `Task schedule: ${taskSchedule.name}\nError: ${JSON.stringify(error)}`,
tag: 'ScheduleService-taskExecutor-error',
})
if (!taskSchedule.mailNotify) return;
this.mailNotificationService.sendMail({
to: taskSchedule.mailAddress,
html: `
Task schedule: ${ JSON.stringify(taskSchedule, null, 2) }<br>
Error: ${error.message} ${JSON.stringify(error)}
`,
})
}
}
}
|
ScheduleService
|
identifier_name
|
count.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
None::<Self::Item>
}
}
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let count: usize = a.count();
assert_eq!(count, 10);
}
#[test]
fn
|
() {
let mut a: A<T> = A { begin: 0, end: 10 };
assert_eq!(a.next(), Some::<T>(0));
let count: usize = a.count();
assert_eq!(count, 9);
}
}
|
count_test2
|
identifier_name
|
count.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
|
None::<Self::Item>
}
}
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let count: usize = a.count();
assert_eq!(count, 10);
}
#[test]
fn count_test2() {
let mut a: A<T> = A { begin: 0, end: 10 };
assert_eq!(a.next(), Some::<T>(0));
let count: usize = a.count();
assert_eq!(count, 9);
}
}
|
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
|
random_line_split
|
global.js
|
/**
* Created by LPAC006013 on 23/11/14.
*/
/*
* wiring Super fish to menu
*/
var sfvar = jQuery('div.menu');
var phoneSize = 600;
jQuery(document).ready(function($) {
//if screen size is bigger than phone's screen (Tablet,Desktop)
if($(document).width() >= phoneSize) {
// enable superfish
sfvar.superfish({
delay: 500,
speed: 'slow'
});
jQuery("#menu-main-menu").addClass('clear');
var containerheight = jQuery("#menu-main-menu").height();
jQuery("#menu-main-menu").children().css("height",containerheight);
}
$(window).resize(function() {
if($(document).width() >= phoneSize && !sfvar.hasClass('sf-js-enabled')) {
sfvar.superfish({
delay: 500,
speed: 'slow'
});
}
// phoneSize, disable superfish
else if($(document).width() < phoneSize) {
sfvar.superfish('destroy');
}
});
|
});
|
random_line_split
|
|
global.js
|
/**
* Created by LPAC006013 on 23/11/14.
*/
/*
* wiring Super fish to menu
*/
var sfvar = jQuery('div.menu');
var phoneSize = 600;
jQuery(document).ready(function($) {
//if screen size is bigger than phone's screen (Tablet,Desktop)
if($(document).width() >= phoneSize)
|
$(window).resize(function() {
if($(document).width() >= phoneSize && !sfvar.hasClass('sf-js-enabled')) {
sfvar.superfish({
delay: 500,
speed: 'slow'
});
}
// phoneSize, disable superfish
else if($(document).width() < phoneSize) {
sfvar.superfish('destroy');
}
});
});
|
{
// enable superfish
sfvar.superfish({
delay: 500,
speed: 'slow'
});
jQuery("#menu-main-menu").addClass('clear');
var containerheight = jQuery("#menu-main-menu").height();
jQuery("#menu-main-menu").children().css("height",containerheight);
}
|
conditional_block
|
main.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout())
|
states: u32,
width: u32,
}
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn generate<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
}
|
}
struct Generator {
|
random_line_split
|
main.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout())
}
struct Generator {
states: u32,
width: u32,
}
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn
|
<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
}
|
generate
|
identifier_name
|
main.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout())
}
struct Generator {
states: u32,
width: u32,
}
const fn num_bits<T>() -> usize
|
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn generate<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
}
|
{ std::mem::size_of::<T>() * 8 }
|
identifier_body
|
settings.py
|
# coding:utf-8
"""
Django settings for turbo project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import datetime
import os
import turbosettings.parameters as parameters
from turbosettings.generate_secret_key import secret_key_from_file
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
USE_X_FORWARDED_HOST = False
FORCE_SCRIPT_NAME = ""
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret_key_from_file('secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'songwriter',
'corsheaders',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'turbosettings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': parameters.TEMPLATES_DIRS if parameters.TEMPLATES_DIRS else [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
'builtins': [
'django.templatetags.i18n',
'django.contrib.humanize.templatetags.humanize',
'django.contrib.staticfiles.templatetags.staticfiles',
],
},
},
]
WSGI_APPLICATION = 'turbosettings.wsgi.application'
CORS_ORIGIN_WHITELIST = [
'localhost:8080',
'127.0.0.1:8080',
]
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = "Europe/Paris"
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda x: x
LANGUAGES = (
('fr', gettext('Français')),
('en', gettext('English')),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = FORCE_SCRIPT_NAME + "/static/"
STATIC_ROOT = BASE_DIR + '/static/'
STATICFILES_DIRS = parameters.STATICFILES_DIRS if parameters.STATICFILES_DIRS else (
"assets/",
)
FIXTURE_DIRS = (
'fixtures/'
)
|
MEDIA_URL = '/'
MEDIA_ROOT = BASE_DIR + '/media/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
JWT_AUTH = {
'JWT_SECRET_KEY': secret_key_from_file('secret_key_jwt'),
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=18000),
}
# For debug toolbar
INTERNAL_IPS = ["127.0.0.1"]
from turbosettings.settings_local import *
|
random_line_split
|
|
slab.rs
|
use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
|
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) -> ! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
|
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
|
random_line_split
|
slab.rs
|
use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn
|
(&mut self, error: AllocErr) -> ! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
|
oom
|
identifier_name
|
slab.rs
|
use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) -> !
|
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
|
{
panic!("Out of memory: {:?}", error);
}
|
identifier_body
|
slab.rs
|
use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else
|
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) -> ! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
|
{
panic!("__rust_allocate: heap not initialized");
}
|
conditional_block
|
ui_highlowDialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\JohnnyG\Documents\XRDproject_Python_11June2010Release backup\highlowDialog.ui'
#
# Created: Mon Jun 14 16:20:37 2010
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_highlowDialog(object):
def setupUi(self, highlowDialog):
highlowDialog.setObjectName("highlowDialog")
highlowDialog.resize(352, 128)
self.buttonBox = QtGui.QDialogButtonBox(highlowDialog)
self.buttonBox.setGeometry(QtCore.QRect(0, 70, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lowSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.lowSpinBox.setGeometry(QtCore.QRect(20, 40, 62, 22))
self.lowSpinBox.setMinimum(-1000000.0)
self.lowSpinBox.setMaximum(1000000.0)
self.lowSpinBox.setObjectName("lowSpinBox")
self.highSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.highSpinBox.setGeometry(QtCore.QRect(100, 40, 62, 20))
self.highSpinBox.setMinimum(-1000000.0)
self.highSpinBox.setMaximum(1000000.0)
self.highSpinBox.setObjectName("highSpinBox")
self.label = QtGui.QLabel(highlowDialog)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(highlowDialog)
self.label_2.setGeometry(QtCore.QRect(100, 20, 76, 16))
self.label_2.setObjectName("label_2")
self.retranslateUi(highlowDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), highlowDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), highlowDialog.reject)
QtCore.QMetaObject.connectSlotsByName(highlowDialog)
def retranslateUi(self, highlowDialog):
|
highlowDialog.setWindowTitle(QtGui.QApplication.translate("highlowDialog", "Enter range for colorbar", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("highlowDialog", "low value", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("highlowDialog", "high value", None, QtGui.QApplication.UnicodeUTF8))
|
identifier_body
|
|
ui_highlowDialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\JohnnyG\Documents\XRDproject_Python_11June2010Release backup\highlowDialog.ui'
#
# Created: Mon Jun 14 16:20:37 2010
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_highlowDialog(object):
def setupUi(self, highlowDialog):
highlowDialog.setObjectName("highlowDialog")
highlowDialog.resize(352, 128)
self.buttonBox = QtGui.QDialogButtonBox(highlowDialog)
self.buttonBox.setGeometry(QtCore.QRect(0, 70, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lowSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.lowSpinBox.setGeometry(QtCore.QRect(20, 40, 62, 22))
self.lowSpinBox.setMinimum(-1000000.0)
self.lowSpinBox.setMaximum(1000000.0)
self.lowSpinBox.setObjectName("lowSpinBox")
self.highSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.highSpinBox.setGeometry(QtCore.QRect(100, 40, 62, 20))
self.highSpinBox.setMinimum(-1000000.0)
self.highSpinBox.setMaximum(1000000.0)
self.highSpinBox.setObjectName("highSpinBox")
self.label = QtGui.QLabel(highlowDialog)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(highlowDialog)
self.label_2.setGeometry(QtCore.QRect(100, 20, 76, 16))
self.label_2.setObjectName("label_2")
self.retranslateUi(highlowDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), highlowDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), highlowDialog.reject)
QtCore.QMetaObject.connectSlotsByName(highlowDialog)
def
|
(self, highlowDialog):
highlowDialog.setWindowTitle(QtGui.QApplication.translate("highlowDialog", "Enter range for colorbar", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("highlowDialog", "low value", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("highlowDialog", "high value", None, QtGui.QApplication.UnicodeUTF8))
|
retranslateUi
|
identifier_name
|
ui_highlowDialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\JohnnyG\Documents\XRDproject_Python_11June2010Release backup\highlowDialog.ui'
#
# Created: Mon Jun 14 16:20:37 2010
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_highlowDialog(object):
def setupUi(self, highlowDialog):
highlowDialog.setObjectName("highlowDialog")
highlowDialog.resize(352, 128)
self.buttonBox = QtGui.QDialogButtonBox(highlowDialog)
self.buttonBox.setGeometry(QtCore.QRect(0, 70, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lowSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.lowSpinBox.setGeometry(QtCore.QRect(20, 40, 62, 22))
self.lowSpinBox.setMinimum(-1000000.0)
self.lowSpinBox.setMaximum(1000000.0)
self.lowSpinBox.setObjectName("lowSpinBox")
self.highSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.highSpinBox.setGeometry(QtCore.QRect(100, 40, 62, 20))
self.highSpinBox.setMinimum(-1000000.0)
self.highSpinBox.setMaximum(1000000.0)
self.highSpinBox.setObjectName("highSpinBox")
self.label = QtGui.QLabel(highlowDialog)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(highlowDialog)
self.label_2.setGeometry(QtCore.QRect(100, 20, 76, 16))
self.label_2.setObjectName("label_2")
self.retranslateUi(highlowDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), highlowDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), highlowDialog.reject)
QtCore.QMetaObject.connectSlotsByName(highlowDialog)
def retranslateUi(self, highlowDialog):
highlowDialog.setWindowTitle(QtGui.QApplication.translate("highlowDialog", "Enter range for colorbar", None, QtGui.QApplication.UnicodeUTF8))
|
self.label_2.setText(QtGui.QApplication.translate("highlowDialog", "high value", None, QtGui.QApplication.UnicodeUTF8))
|
self.label.setText(QtGui.QApplication.translate("highlowDialog", "low value", None, QtGui.QApplication.UnicodeUTF8))
|
random_line_split
|
SetPhotoLocation.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SetPhotoLocation
# Sets the geo data (including latitude and longitude) for a specified photo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SetPhotoLocation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SetPhotoLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SetPhotoLocation, self).__init__(temboo_session, '/Library/Flickr/Geo/SetPhotoLocation')
def new_input_set(self):
return SetPhotoLocationInputSet()
def _make_result_set(self, result, path):
return SetPhotoLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SetPhotoLocationChoreographyExecution(session, exec_id, path)
class SetPhotoLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SetPhotoLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Flickr (AKA the OAuth Consumer Secret).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APISecret', value)
def
|
(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessToken', value)
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, integer) Recorded accuracy level of the location information. Current range is 1-16. Defaults to 16 if not specified.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Accuracy', value)
def set_Context(self, value):
"""
Set the value of the Context input for this Choreo. ((optional, string) A numeric value representing the photo's location beyond latitude and longitude. For example, you can indicate that a photo was taken "indoors" or "outdoors". Set to 1 for indoors or 2 for outdoors.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Context', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude whose valid range is -90 to 90. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude whose valid range is -180 to 180. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Longitude', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id of the photo to set location data for.)
"""
super(SetPhotoLocationInputSet, self)._set_input('PhotoID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(SetPhotoLocationInputSet, self)._set_input('ResponseFormat', value)
class SetPhotoLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SetPhotoLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class SetPhotoLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SetPhotoLocationResultSet(response, path)
|
set_AccessTokenSecret
|
identifier_name
|
SetPhotoLocation.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SetPhotoLocation
# Sets the geo data (including latitude and longitude) for a specified photo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SetPhotoLocation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SetPhotoLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SetPhotoLocation, self).__init__(temboo_session, '/Library/Flickr/Geo/SetPhotoLocation')
def new_input_set(self):
return SetPhotoLocationInputSet()
def _make_result_set(self, result, path):
return SetPhotoLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SetPhotoLocationChoreographyExecution(session, exec_id, path)
class SetPhotoLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SetPhotoLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Flickr (AKA the OAuth Consumer Secret).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APISecret', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessToken', value)
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, integer) Recorded accuracy level of the location information. Current range is 1-16. Defaults to 16 if not specified.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Accuracy', value)
def set_Context(self, value):
"""
Set the value of the Context input for this Choreo. ((optional, string) A numeric value representing the photo's location beyond latitude and longitude. For example, you can indicate that a photo was taken "indoors" or "outdoors". Set to 1 for indoors or 2 for outdoors.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Context', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude whose valid range is -90 to 90. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude whose valid range is -180 to 180. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Longitude', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id of the photo to set location data for.)
"""
super(SetPhotoLocationInputSet, self)._set_input('PhotoID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(SetPhotoLocationInputSet, self)._set_input('ResponseFormat', value)
class SetPhotoLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SetPhotoLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
|
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class SetPhotoLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SetPhotoLocationResultSet(response, path)
|
return json.loads(str)
|
identifier_body
|
SetPhotoLocation.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SetPhotoLocation
# Sets the geo data (including latitude and longitude) for a specified photo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SetPhotoLocation(Choreography):
|
def __init__(self, temboo_session):
"""
Create a new instance of the SetPhotoLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SetPhotoLocation, self).__init__(temboo_session, '/Library/Flickr/Geo/SetPhotoLocation')
def new_input_set(self):
return SetPhotoLocationInputSet()
def _make_result_set(self, result, path):
return SetPhotoLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SetPhotoLocationChoreographyExecution(session, exec_id, path)
class SetPhotoLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SetPhotoLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Flickr (AKA the OAuth Consumer Secret).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APISecret', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessToken', value)
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, integer) Recorded accuracy level of the location information. Current range is 1-16. Defaults to 16 if not specified.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Accuracy', value)
def set_Context(self, value):
"""
Set the value of the Context input for this Choreo. ((optional, string) A numeric value representing the photo's location beyond latitude and longitude. For example, you can indicate that a photo was taken "indoors" or "outdoors". Set to 1 for indoors or 2 for outdoors.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Context', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude whose valid range is -90 to 90. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude whose valid range is -180 to 180. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Longitude', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id of the photo to set location data for.)
"""
super(SetPhotoLocationInputSet, self)._set_input('PhotoID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(SetPhotoLocationInputSet, self)._set_input('ResponseFormat', value)
class SetPhotoLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SetPhotoLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class SetPhotoLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SetPhotoLocationResultSet(response, path)
|
random_line_split
|
|
util.js
|
// @flow
import Decimal from "decimal.js-light"
import * as R from "ramda"
import _ from "lodash"
import moment from "moment"
import { CS_DEFAULT, CS_ERROR_MESSAGES, ORDER_FULFILLED } from "../constants"
import type { BootcampRun } from "../flow/bootcampTypes"
import type { HttpResponse } from "../flow/httpTypes"
/**
* Creates a POST form with hidden input fields
* @param url the url for the form action
* @param payload Each key value pair will become an input field
*/
export function createForm(url: string, payload: Object): HTMLFormElement {
const form = document.createElement("form")
form.setAttribute("action", url)
form.setAttribute("method", "post")
form.setAttribute("class", "cybersource-payload")
for (const key: string of Object.keys(payload)) {
const value = payload[key]
const input = document.createElement("input")
input.setAttribute("name", key)
input.setAttribute("value", value)
input.setAttribute("type", "hidden")
form.appendChild(input)
}
return form
}
export const isNilOrBlank = R.either(R.isNil, R.isEmpty)
export const formatDollarAmount = (amount: ?number): string => {
amount = amount || 0
const formattedAmount = amount.toLocaleString("en-US", {
style: "currency",
currency: "USD",
minimumFractionDigits: 2,
maximumFractionDigits: 2
})
return formattedAmount.endsWith(".00") ?
formattedAmount.substring(0, formattedAmount.length - 3) :
formattedAmount
}
export const formatReadableDate = (datetime: moment$Moment): string =>
datetime.format("MMM D, YYYY")
export const formatReadableDateFromStr = (datetimeString: string): string =>
formatReadableDate(moment(datetimeString))
export const formatStartEndDateStrings = (
startDtString: ?string,
endDtString: ?string
): string => {
let formattedStart, formattedEnd
if (startDtString) {
formattedStart = formatReadableDateFromStr(startDtString)
}
if (endDtString) {
formattedEnd = formatReadableDateFromStr(endDtString)
}
if (!formattedStart && !formattedEnd) {
return ""
} else if (!formattedStart) {
// $FlowFixMe: This cannot be un-initialized
return `Ends ${formattedEnd}`
} else if (!formattedEnd) {
// $FlowFixMe: This cannot be un-initialized
return `Starts ${formattedStart}`
} else {
return `${formattedStart} - ${formattedEnd}`
}
}
export const getRunWithFulfilledOrder = (
runData: ?Array<Object>,
orderId: number
) =>
R.find(
bootcampRun =>
R.any(
payment =>
payment.order.status === ORDER_FULFILLED &&
payment.order.id === orderId,
bootcampRun.payments
),
runData
)
export const getInstallmentDeadlineDates = R.map(
R.compose(moment, R.prop("deadline"))
)
export function* incrementer(): Generator<number, *, *> {
let int = 1
// eslint-disable-next-line no-constant-condition
while (true) {
yield int++
}
}
export const formatTitle = (text: string) => `MIT Bootcamps | ${text}`
export const newSetWith = (set: Set<*>, valueToAdd: any): Set<*> => {
const newSet = new Set(set)
newSet.add(valueToAdd)
return newSet
}
export const newSetWithout = (set: Set<*>, valueToDelete: any): Set<*> => {
const newSet = new Set(set)
newSet.delete(valueToDelete)
return newSet
}
export const formatPrice = (price: ?string | number | Decimal): string => {
if (price === null || price === undefined) {
return ""
} else {
let decimalPrice: Decimal = Decimal(price).toDecimalPlaces(2)
let formattedPrice
const isNegative = decimalPrice.isNegative()
if (isNegative) {
decimalPrice = decimalPrice.times(-1)
}
if (decimalPrice.isInteger()) {
formattedPrice = decimalPrice.toFixed(0)
} else {
formattedPrice = decimalPrice.toFixed(2, Decimal.ROUND_HALF_UP)
}
return `${isNegative ? "-" : ""}$${formattedPrice}`
}
}
export const getFilenameFromPath = (url: string) =>
url.substring(url.lastIndexOf("/") + 1)
/*
* Our uploaded filenames begin with a media path. Until we start saving the
* raw file names for uploaded files, this utility function can be used to
* extract the file name.
* Ex: "media/1/abcde-12345_some_resume.pdf" -> "some_resume.pdf"
*/
export const getFilenameFromMediaPath = R.compose(
R.join("_"),
R.tail(),
R.split("_"),
R.defaultTo("")
)
export const isErrorStatusCode = (statusCode: number): boolean =>
statusCode >= 400
export const isErrorResponse = (response: HttpResponse<*>): boolean =>
isErrorStatusCode(response.status)
export const getResponseBodyErrors = (
response: HttpResponse<*>
): string | Array<string> | null => {
if (!response || !response.body || !response.body.errors) {
return null
}
|
(Array.isArray(response.body.errors)) {
return response.body.errors.length === 0 ? null : response.body.errors
}
return response.body.errors === "" ? null : response.body.errors
}
export const getFirstResponseBodyError = (
response: HttpResponse<*>
): ?string => {
const errors = getResponseBodyErrors(response)
if (!Array.isArray(errors)) {
return errors
}
return errors.length === 0 ? null : errors[0]
}
export const getXhrResponseError = (response: Object): ?string => {
if (_.isString(response)) {
try {
response = JSON.parse(response)
} catch (e) {
return null
}
}
if (!_.isObject(response)) {
return null
}
if (_.isArray(response) && response.length > 0) {
return response[0]
}
if (response.errors && response.errors.length > 0) {
return response.errors[0]
}
if (response.error && response.error !== "") {
return response.error
}
return null
}
export const parsePrice = (priceStr: string | number): Decimal => {
let price
try {
price = new Decimal(priceStr)
} catch (e) {
return null
}
return price.toDecimalPlaces(2)
}
export const formatRunDateRange = (run: BootcampRun) =>
`${run.start_date ? formatReadableDateFromStr(run.start_date) : "TBD"} - ${
run.end_date ? formatReadableDateFromStr(run.end_date) : "TBD"
}`
export const recoverableErrorCode = (error: string) =>
error ? error.match(/(CS_101|CS_102)/g) : null
export const transformError = (error: string) =>
CS_ERROR_MESSAGES[recoverableErrorCode(error) || CS_DEFAULT]
export const isLocalStorageSupported = () => {
try {
const key = "__local_storage_access_key__"
window.localStorage.setItem(key, key)
window.localStorage.getItem(key)
return true
} catch (e) {
return false
}
}
export const createNovoEdLinkUrl = (baseUrl: string, stub: string): string => {
return `${baseUrl}/#!/courses/${stub}/home`
}
|
if
|
identifier_name
|
util.js
|
// @flow
import Decimal from "decimal.js-light"
import * as R from "ramda"
import _ from "lodash"
import moment from "moment"
import { CS_DEFAULT, CS_ERROR_MESSAGES, ORDER_FULFILLED } from "../constants"
import type { BootcampRun } from "../flow/bootcampTypes"
import type { HttpResponse } from "../flow/httpTypes"
/**
* Creates a POST form with hidden input fields
* @param url the url for the form action
* @param payload Each key value pair will become an input field
*/
export function createForm(url: string, payload: Object): HTMLFormElement
|
export const isNilOrBlank = R.either(R.isNil, R.isEmpty)
export const formatDollarAmount = (amount: ?number): string => {
amount = amount || 0
const formattedAmount = amount.toLocaleString("en-US", {
style: "currency",
currency: "USD",
minimumFractionDigits: 2,
maximumFractionDigits: 2
})
return formattedAmount.endsWith(".00") ?
formattedAmount.substring(0, formattedAmount.length - 3) :
formattedAmount
}
export const formatReadableDate = (datetime: moment$Moment): string =>
datetime.format("MMM D, YYYY")
export const formatReadableDateFromStr = (datetimeString: string): string =>
formatReadableDate(moment(datetimeString))
export const formatStartEndDateStrings = (
startDtString: ?string,
endDtString: ?string
): string => {
let formattedStart, formattedEnd
if (startDtString) {
formattedStart = formatReadableDateFromStr(startDtString)
}
if (endDtString) {
formattedEnd = formatReadableDateFromStr(endDtString)
}
if (!formattedStart && !formattedEnd) {
return ""
} else if (!formattedStart) {
// $FlowFixMe: This cannot be un-initialized
return `Ends ${formattedEnd}`
} else if (!formattedEnd) {
// $FlowFixMe: This cannot be un-initialized
return `Starts ${formattedStart}`
} else {
return `${formattedStart} - ${formattedEnd}`
}
}
export const getRunWithFulfilledOrder = (
runData: ?Array<Object>,
orderId: number
) =>
R.find(
bootcampRun =>
R.any(
payment =>
payment.order.status === ORDER_FULFILLED &&
payment.order.id === orderId,
bootcampRun.payments
),
runData
)
export const getInstallmentDeadlineDates = R.map(
R.compose(moment, R.prop("deadline"))
)
export function* incrementer(): Generator<number, *, *> {
let int = 1
// eslint-disable-next-line no-constant-condition
while (true) {
yield int++
}
}
export const formatTitle = (text: string) => `MIT Bootcamps | ${text}`
export const newSetWith = (set: Set<*>, valueToAdd: any): Set<*> => {
const newSet = new Set(set)
newSet.add(valueToAdd)
return newSet
}
export const newSetWithout = (set: Set<*>, valueToDelete: any): Set<*> => {
const newSet = new Set(set)
newSet.delete(valueToDelete)
return newSet
}
export const formatPrice = (price: ?string | number | Decimal): string => {
if (price === null || price === undefined) {
return ""
} else {
let decimalPrice: Decimal = Decimal(price).toDecimalPlaces(2)
let formattedPrice
const isNegative = decimalPrice.isNegative()
if (isNegative) {
decimalPrice = decimalPrice.times(-1)
}
if (decimalPrice.isInteger()) {
formattedPrice = decimalPrice.toFixed(0)
} else {
formattedPrice = decimalPrice.toFixed(2, Decimal.ROUND_HALF_UP)
}
return `${isNegative ? "-" : ""}$${formattedPrice}`
}
}
export const getFilenameFromPath = (url: string) =>
url.substring(url.lastIndexOf("/") + 1)
/*
* Our uploaded filenames begin with a media path. Until we start saving the
* raw file names for uploaded files, this utility function can be used to
* extract the file name.
* Ex: "media/1/abcde-12345_some_resume.pdf" -> "some_resume.pdf"
*/
export const getFilenameFromMediaPath = R.compose(
R.join("_"),
R.tail(),
R.split("_"),
R.defaultTo("")
)
export const isErrorStatusCode = (statusCode: number): boolean =>
statusCode >= 400
export const isErrorResponse = (response: HttpResponse<*>): boolean =>
isErrorStatusCode(response.status)
export const getResponseBodyErrors = (
response: HttpResponse<*>
): string | Array<string> | null => {
if (!response || !response.body || !response.body.errors) {
return null
}
if (Array.isArray(response.body.errors)) {
return response.body.errors.length === 0 ? null : response.body.errors
}
return response.body.errors === "" ? null : response.body.errors
}
export const getFirstResponseBodyError = (
response: HttpResponse<*>
): ?string => {
const errors = getResponseBodyErrors(response)
if (!Array.isArray(errors)) {
return errors
}
return errors.length === 0 ? null : errors[0]
}
export const getXhrResponseError = (response: Object): ?string => {
if (_.isString(response)) {
try {
response = JSON.parse(response)
} catch (e) {
return null
}
}
if (!_.isObject(response)) {
return null
}
if (_.isArray(response) && response.length > 0) {
return response[0]
}
if (response.errors && response.errors.length > 0) {
return response.errors[0]
}
if (response.error && response.error !== "") {
return response.error
}
return null
}
export const parsePrice = (priceStr: string | number): Decimal => {
let price
try {
price = new Decimal(priceStr)
} catch (e) {
return null
}
return price.toDecimalPlaces(2)
}
export const formatRunDateRange = (run: BootcampRun) =>
`${run.start_date ? formatReadableDateFromStr(run.start_date) : "TBD"} - ${
run.end_date ? formatReadableDateFromStr(run.end_date) : "TBD"
}`
export const recoverableErrorCode = (error: string) =>
error ? error.match(/(CS_101|CS_102)/g) : null
export const transformError = (error: string) =>
CS_ERROR_MESSAGES[recoverableErrorCode(error) || CS_DEFAULT]
export const isLocalStorageSupported = () => {
try {
const key = "__local_storage_access_key__"
window.localStorage.setItem(key, key)
window.localStorage.getItem(key)
return true
} catch (e) {
return false
}
}
export const createNovoEdLinkUrl = (baseUrl: string, stub: string): string => {
return `${baseUrl}/#!/courses/${stub}/home`
}
|
{
const form = document.createElement("form")
form.setAttribute("action", url)
form.setAttribute("method", "post")
form.setAttribute("class", "cybersource-payload")
for (const key: string of Object.keys(payload)) {
const value = payload[key]
const input = document.createElement("input")
input.setAttribute("name", key)
input.setAttribute("value", value)
input.setAttribute("type", "hidden")
form.appendChild(input)
}
return form
}
|
identifier_body
|
util.js
|
// @flow
import Decimal from "decimal.js-light"
import * as R from "ramda"
import _ from "lodash"
import moment from "moment"
import { CS_DEFAULT, CS_ERROR_MESSAGES, ORDER_FULFILLED } from "../constants"
import type { BootcampRun } from "../flow/bootcampTypes"
import type { HttpResponse } from "../flow/httpTypes"
/**
* Creates a POST form with hidden input fields
* @param url the url for the form action
* @param payload Each key value pair will become an input field
*/
export function createForm(url: string, payload: Object): HTMLFormElement {
const form = document.createElement("form")
form.setAttribute("action", url)
form.setAttribute("method", "post")
form.setAttribute("class", "cybersource-payload")
for (const key: string of Object.keys(payload)) {
const value = payload[key]
const input = document.createElement("input")
input.setAttribute("name", key)
input.setAttribute("value", value)
input.setAttribute("type", "hidden")
form.appendChild(input)
}
return form
}
export const isNilOrBlank = R.either(R.isNil, R.isEmpty)
export const formatDollarAmount = (amount: ?number): string => {
amount = amount || 0
const formattedAmount = amount.toLocaleString("en-US", {
style: "currency",
currency: "USD",
minimumFractionDigits: 2,
maximumFractionDigits: 2
})
return formattedAmount.endsWith(".00") ?
formattedAmount.substring(0, formattedAmount.length - 3) :
formattedAmount
}
export const formatReadableDate = (datetime: moment$Moment): string =>
datetime.format("MMM D, YYYY")
export const formatReadableDateFromStr = (datetimeString: string): string =>
formatReadableDate(moment(datetimeString))
export const formatStartEndDateStrings = (
startDtString: ?string,
endDtString: ?string
): string => {
let formattedStart, formattedEnd
if (startDtString) {
formattedStart = formatReadableDateFromStr(startDtString)
}
if (endDtString) {
formattedEnd = formatReadableDateFromStr(endDtString)
}
if (!formattedStart && !formattedEnd) {
return ""
} else if (!formattedStart) {
// $FlowFixMe: This cannot be un-initialized
return `Ends ${formattedEnd}`
} else if (!formattedEnd) {
// $FlowFixMe: This cannot be un-initialized
return `Starts ${formattedStart}`
} else {
return `${formattedStart} - ${formattedEnd}`
}
}
export const getRunWithFulfilledOrder = (
runData: ?Array<Object>,
orderId: number
) =>
R.find(
bootcampRun =>
R.any(
payment =>
payment.order.status === ORDER_FULFILLED &&
payment.order.id === orderId,
bootcampRun.payments
),
runData
)
export const getInstallmentDeadlineDates = R.map(
R.compose(moment, R.prop("deadline"))
)
export function* incrementer(): Generator<number, *, *> {
let int = 1
// eslint-disable-next-line no-constant-condition
while (true) {
yield int++
}
}
export const formatTitle = (text: string) => `MIT Bootcamps | ${text}`
export const newSetWith = (set: Set<*>, valueToAdd: any): Set<*> => {
const newSet = new Set(set)
newSet.add(valueToAdd)
return newSet
}
export const newSetWithout = (set: Set<*>, valueToDelete: any): Set<*> => {
const newSet = new Set(set)
newSet.delete(valueToDelete)
return newSet
}
export const formatPrice = (price: ?string | number | Decimal): string => {
if (price === null || price === undefined) {
return ""
} else {
let decimalPrice: Decimal = Decimal(price).toDecimalPlaces(2)
let formattedPrice
const isNegative = decimalPrice.isNegative()
if (isNegative) {
decimalPrice = decimalPrice.times(-1)
}
if (decimalPrice.isInteger()) {
formattedPrice = decimalPrice.toFixed(0)
} else {
formattedPrice = decimalPrice.toFixed(2, Decimal.ROUND_HALF_UP)
}
return `${isNegative ? "-" : ""}$${formattedPrice}`
}
}
export const getFilenameFromPath = (url: string) =>
url.substring(url.lastIndexOf("/") + 1)
|
* Our uploaded filenames begin with a media path. Until we start saving the
* raw file names for uploaded files, this utility function can be used to
* extract the file name.
* Ex: "media/1/abcde-12345_some_resume.pdf" -> "some_resume.pdf"
*/
export const getFilenameFromMediaPath = R.compose(
R.join("_"),
R.tail(),
R.split("_"),
R.defaultTo("")
)
export const isErrorStatusCode = (statusCode: number): boolean =>
statusCode >= 400
export const isErrorResponse = (response: HttpResponse<*>): boolean =>
isErrorStatusCode(response.status)
export const getResponseBodyErrors = (
response: HttpResponse<*>
): string | Array<string> | null => {
if (!response || !response.body || !response.body.errors) {
return null
}
if (Array.isArray(response.body.errors)) {
return response.body.errors.length === 0 ? null : response.body.errors
}
return response.body.errors === "" ? null : response.body.errors
}
export const getFirstResponseBodyError = (
response: HttpResponse<*>
): ?string => {
const errors = getResponseBodyErrors(response)
if (!Array.isArray(errors)) {
return errors
}
return errors.length === 0 ? null : errors[0]
}
export const getXhrResponseError = (response: Object): ?string => {
if (_.isString(response)) {
try {
response = JSON.parse(response)
} catch (e) {
return null
}
}
if (!_.isObject(response)) {
return null
}
if (_.isArray(response) && response.length > 0) {
return response[0]
}
if (response.errors && response.errors.length > 0) {
return response.errors[0]
}
if (response.error && response.error !== "") {
return response.error
}
return null
}
export const parsePrice = (priceStr: string | number): Decimal => {
let price
try {
price = new Decimal(priceStr)
} catch (e) {
return null
}
return price.toDecimalPlaces(2)
}
export const formatRunDateRange = (run: BootcampRun) =>
`${run.start_date ? formatReadableDateFromStr(run.start_date) : "TBD"} - ${
run.end_date ? formatReadableDateFromStr(run.end_date) : "TBD"
}`
export const recoverableErrorCode = (error: string) =>
error ? error.match(/(CS_101|CS_102)/g) : null
export const transformError = (error: string) =>
CS_ERROR_MESSAGES[recoverableErrorCode(error) || CS_DEFAULT]
export const isLocalStorageSupported = () => {
try {
const key = "__local_storage_access_key__"
window.localStorage.setItem(key, key)
window.localStorage.getItem(key)
return true
} catch (e) {
return false
}
}
export const createNovoEdLinkUrl = (baseUrl: string, stub: string): string => {
return `${baseUrl}/#!/courses/${stub}/home`
}
|
/*
|
random_line_split
|
util.js
|
// @flow
import Decimal from "decimal.js-light"
import * as R from "ramda"
import _ from "lodash"
import moment from "moment"
import { CS_DEFAULT, CS_ERROR_MESSAGES, ORDER_FULFILLED } from "../constants"
import type { BootcampRun } from "../flow/bootcampTypes"
import type { HttpResponse } from "../flow/httpTypes"
/**
* Creates a POST form with hidden input fields
* @param url the url for the form action
* @param payload Each key value pair will become an input field
*/
export function createForm(url: string, payload: Object): HTMLFormElement {
const form = document.createElement("form")
form.setAttribute("action", url)
form.setAttribute("method", "post")
form.setAttribute("class", "cybersource-payload")
for (const key: string of Object.keys(payload)) {
const value = payload[key]
const input = document.createElement("input")
input.setAttribute("name", key)
input.setAttribute("value", value)
input.setAttribute("type", "hidden")
form.appendChild(input)
}
return form
}
export const isNilOrBlank = R.either(R.isNil, R.isEmpty)
export const formatDollarAmount = (amount: ?number): string => {
amount = amount || 0
const formattedAmount = amount.toLocaleString("en-US", {
style: "currency",
currency: "USD",
minimumFractionDigits: 2,
maximumFractionDigits: 2
})
return formattedAmount.endsWith(".00") ?
formattedAmount.substring(0, formattedAmount.length - 3) :
formattedAmount
}
export const formatReadableDate = (datetime: moment$Moment): string =>
datetime.format("MMM D, YYYY")
export const formatReadableDateFromStr = (datetimeString: string): string =>
formatReadableDate(moment(datetimeString))
export const formatStartEndDateStrings = (
startDtString: ?string,
endDtString: ?string
): string => {
let formattedStart, formattedEnd
if (startDtString) {
formattedStart = formatReadableDateFromStr(startDtString)
}
if (endDtString) {
formattedEnd = formatReadableDateFromStr(endDtString)
}
if (!formattedStart && !formattedEnd) {
return ""
} else if (!formattedStart) {
// $FlowFixMe: This cannot be un-initialized
return `Ends ${formattedEnd}`
} else if (!formattedEnd) {
// $FlowFixMe: This cannot be un-initialized
return `Starts ${formattedStart}`
} else {
return `${formattedStart} - ${formattedEnd}`
}
}
export const getRunWithFulfilledOrder = (
runData: ?Array<Object>,
orderId: number
) =>
R.find(
bootcampRun =>
R.any(
payment =>
payment.order.status === ORDER_FULFILLED &&
payment.order.id === orderId,
bootcampRun.payments
),
runData
)
export const getInstallmentDeadlineDates = R.map(
R.compose(moment, R.prop("deadline"))
)
export function* incrementer(): Generator<number, *, *> {
let int = 1
// eslint-disable-next-line no-constant-condition
while (true) {
yield int++
}
}
export const formatTitle = (text: string) => `MIT Bootcamps | ${text}`
export const newSetWith = (set: Set<*>, valueToAdd: any): Set<*> => {
const newSet = new Set(set)
newSet.add(valueToAdd)
return newSet
}
export const newSetWithout = (set: Set<*>, valueToDelete: any): Set<*> => {
const newSet = new Set(set)
newSet.delete(valueToDelete)
return newSet
}
export const formatPrice = (price: ?string | number | Decimal): string => {
if (price === null || price === undefined) {
return ""
} else {
let decimalPrice: Decimal = Decimal(price).toDecimalPlaces(2)
let formattedPrice
const isNegative = decimalPrice.isNegative()
if (isNegative) {
decimalPrice = decimalPrice.times(-1)
}
if (decimalPrice.isInteger()) {
formattedPrice = decimalPrice.toFixed(0)
} else {
formattedPrice = decimalPrice.toFixed(2, Decimal.ROUND_HALF_UP)
}
return `${isNegative ? "-" : ""}$${formattedPrice}`
}
}
export const getFilenameFromPath = (url: string) =>
url.substring(url.lastIndexOf("/") + 1)
/*
* Our uploaded filenames begin with a media path. Until we start saving the
* raw file names for uploaded files, this utility function can be used to
* extract the file name.
* Ex: "media/1/abcde-12345_some_resume.pdf" -> "some_resume.pdf"
*/
export const getFilenameFromMediaPath = R.compose(
R.join("_"),
R.tail(),
R.split("_"),
R.defaultTo("")
)
export const isErrorStatusCode = (statusCode: number): boolean =>
statusCode >= 400
export const isErrorResponse = (response: HttpResponse<*>): boolean =>
isErrorStatusCode(response.status)
export const getResponseBodyErrors = (
response: HttpResponse<*>
): string | Array<string> | null => {
if (!response || !response.body || !response.body.errors) {
return null
}
if (Array.isArray(response.body.errors)) {
return response.body.errors.length === 0 ? null : response.body.errors
}
return response.body.errors === "" ? null : response.body.errors
}
export const getFirstResponseBodyError = (
response: HttpResponse<*>
): ?string => {
const errors = getResponseBodyErrors(response)
if (!Array.isArray(errors)) {
return errors
}
return errors.length === 0 ? null : errors[0]
}
export const getXhrResponseError = (response: Object): ?string => {
if (_.isString(response)) {
try {
response = JSON.parse(response)
} catch (e) {
return null
}
}
if (!_.isObject(response))
|
if (_.isArray(response) && response.length > 0) {
return response[0]
}
if (response.errors && response.errors.length > 0) {
return response.errors[0]
}
if (response.error && response.error !== "") {
return response.error
}
return null
}
export const parsePrice = (priceStr: string | number): Decimal => {
let price
try {
price = new Decimal(priceStr)
} catch (e) {
return null
}
return price.toDecimalPlaces(2)
}
export const formatRunDateRange = (run: BootcampRun) =>
`${run.start_date ? formatReadableDateFromStr(run.start_date) : "TBD"} - ${
run.end_date ? formatReadableDateFromStr(run.end_date) : "TBD"
}`
export const recoverableErrorCode = (error: string) =>
error ? error.match(/(CS_101|CS_102)/g) : null
export const transformError = (error: string) =>
CS_ERROR_MESSAGES[recoverableErrorCode(error) || CS_DEFAULT]
export const isLocalStorageSupported = () => {
try {
const key = "__local_storage_access_key__"
window.localStorage.setItem(key, key)
window.localStorage.getItem(key)
return true
} catch (e) {
return false
}
}
export const createNovoEdLinkUrl = (baseUrl: string, stub: string): string => {
return `${baseUrl}/#!/courses/${stub}/home`
}
|
{
return null
}
|
conditional_block
|
index.js
|
'use strict';
require('nightingale-app-console');
var _pool = require('koack/pool');
var _pool2 = _interopRequireDefault(_pool);
var _server = require('koack/server');
var _server2 = _interopRequireDefault(_server);
var _memory = require('koack/storages/memory');
var _memory2 = _interopRequireDefault(_memory);
var _interactiveMessages = require('koack/interactive-messages');
var _interactiveMessages2 = _interopRequireDefault(_interactiveMessages);
var _config = require('../config');
var _config2 = _interopRequireDefault(_config);
function
|
(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
const pool = new _pool2.default({
size: 100,
path: require.resolve('./bot')
});
const server = new _server2.default({
pool,
scopes: ['bot'],
slackClient: _config2.default.slackClient,
storage: (0, _memory2.default)()
});
server.proxy = true;
server.use((0, _interactiveMessages2.default)({
pool,
token: _config2.default.verificationToken
}));
server.listen({ port: process.env.PORT || 3000 });
//# sourceMappingURL=index.js.map
|
_interopRequireDefault
|
identifier_name
|
index.js
|
'use strict';
require('nightingale-app-console');
var _pool = require('koack/pool');
var _pool2 = _interopRequireDefault(_pool);
var _server = require('koack/server');
var _server2 = _interopRequireDefault(_server);
var _memory = require('koack/storages/memory');
var _memory2 = _interopRequireDefault(_memory);
var _interactiveMessages = require('koack/interactive-messages');
var _interactiveMessages2 = _interopRequireDefault(_interactiveMessages);
var _config = require('../config');
var _config2 = _interopRequireDefault(_config);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
const pool = new _pool2.default({
size: 100,
path: require.resolve('./bot')
});
const server = new _server2.default({
pool,
scopes: ['bot'],
slackClient: _config2.default.slackClient,
storage: (0, _memory2.default)()
});
server.proxy = true;
server.use((0, _interactiveMessages2.default)({
pool,
token: _config2.default.verificationToken
}));
|
server.listen({ port: process.env.PORT || 3000 });
//# sourceMappingURL=index.js.map
|
random_line_split
|
|
index.js
|
'use strict';
require('nightingale-app-console');
var _pool = require('koack/pool');
var _pool2 = _interopRequireDefault(_pool);
var _server = require('koack/server');
var _server2 = _interopRequireDefault(_server);
var _memory = require('koack/storages/memory');
var _memory2 = _interopRequireDefault(_memory);
var _interactiveMessages = require('koack/interactive-messages');
var _interactiveMessages2 = _interopRequireDefault(_interactiveMessages);
var _config = require('../config');
var _config2 = _interopRequireDefault(_config);
function _interopRequireDefault(obj)
|
const pool = new _pool2.default({
size: 100,
path: require.resolve('./bot')
});
const server = new _server2.default({
pool,
scopes: ['bot'],
slackClient: _config2.default.slackClient,
storage: (0, _memory2.default)()
});
server.proxy = true;
server.use((0, _interactiveMessages2.default)({
pool,
token: _config2.default.verificationToken
}));
server.listen({ port: process.env.PORT || 3000 });
//# sourceMappingURL=index.js.map
|
{ return obj && obj.__esModule ? obj : { default: obj }; }
|
identifier_body
|
memory.ts
|
import { union } from 'lodash';
import matchesClientOption from '../utils/memoryModels/matchesClientOption';
import FacadeConfig from '../utils/memoryModels/FacadeConfig';
import Signature, { Opts } from './Signature';
export default (config: FacadeConfig): Signature => {
return async ({
id,
client,
agents,
relatedAgents,
verbs,
activities,
relatedActivities,
registrations
}) => {
config.state.statements = config.state.statements.map((model) => {
if (model.statement.id === id && matchesClientOption(model, client)) {
return {
...model,
|
activities: union(activities, model.activities),
relatedActivities: union(relatedActivities, model.relatedActivities),
registrations: union(registrations, model.registrations),
};
}
return model;
});
};
};
|
agents: union(agents, model.agents),
relatedAgents: union(relatedAgents, model.relatedAgents),
verbs: union(verbs, model.verbs),
|
random_line_split
|
memory.ts
|
import { union } from 'lodash';
import matchesClientOption from '../utils/memoryModels/matchesClientOption';
import FacadeConfig from '../utils/memoryModels/FacadeConfig';
import Signature, { Opts } from './Signature';
export default (config: FacadeConfig): Signature => {
return async ({
id,
client,
agents,
relatedAgents,
verbs,
activities,
relatedActivities,
registrations
}) => {
config.state.statements = config.state.statements.map((model) => {
if (model.statement.id === id && matchesClientOption(model, client))
|
return model;
});
};
};
|
{
return {
...model,
agents: union(agents, model.agents),
relatedAgents: union(relatedAgents, model.relatedAgents),
verbs: union(verbs, model.verbs),
activities: union(activities, model.activities),
relatedActivities: union(relatedActivities, model.relatedActivities),
registrations: union(registrations, model.registrations),
};
}
|
conditional_block
|
Featured.js
|
var
// get util library
util = require("core/Util");
function Featured(options){
var self = Ti.UI.createTableView({
width : Ti.UI.FILL,
backgroundColor : Theme.Home.Featured.HeaderBackgroundColor,
height : options.height || null
});
return {
get : function(){
return self;
},
/*
* Featured row factory method
*
* @param {String} name: the product name to display
* @param {String} image: the icon image to display
* @param {String} desc: description of item to display in row
* @param {String} itemId: item id used to load product page
*/
createRow : function(name, image, desc, itemId){
var row = Ti.UI.createTableViewRow({
className : "featured_rows",
backgroundColor : Theme.Home.Featured.RowsBackgroundColor,
selectedBackgroundColor : Theme.Home.Featured.SelectedBackgroundColor,
hasChild : true
}),
img = Ti.UI.createImageView({
image : image,
left : 1,
top : 1,
borderWidth : 3,
borderColor : Theme.Home.Featured.ImageBorderColor,
defaultImage : Config.PRODUCTS_DEFAULT_THUMB_IMAGE
}),
bodyView = Ti.UI.createView({
layout : "vertical"
}),
title = Ti.UI.createLabel({
text : name,
minimumFontSize : 12,
color : Theme.Home.Featured.TitleColor,
height : Ti.UI.SIZE,
left : 2,
top : 4,
font : {
|
body = Ti.UI.createLabel({
text : desc,
height : Ti.UI.SIZE,
left : 2,
top : 2,
color : Theme.Home.Featured.DescriptionColor,
font : {
fontSize : Theme.Home.Featured.DescriptionFontSize,
fontWeight : Theme.Home.Featured.DescriptionFontWeight
}
});
// assemble row
bodyView.add(title);
bodyView.add(body);
row.add(img);
if(util.osname==="android"){
img.width = Theme.Home.Featured.ImageWidth + "dip";
img.height = Theme.Home.Featured.ImageHeight + "dip";
bodyView.left = (Theme.Home.Featured.ImageWidth + 1) + "dip";
bodyView.right = "3dip";
bodyView.top = 0;
bodyView.bottom = 2;
body.height = Ti.UI.SIZE;
}
else{
img.width = Theme.Home.Featured.ImageWidth;
bodyView.left = Theme.Home.Featured.ImageWidth + 1;
bodyView.height = Ti.UI.SIZE;
}
row.add(bodyView);
// handle featured item click event
row.addEventListener(
"click",
function(e){
Ti.App.fireEvent(
"APP:SHOW_PRODUCT",
{ "itemId" : itemId, "tab" : "Home" }
);
}
);
return row;
}
}
}
exports.create = function(options){
return Featured(options);
};
|
fontSize : Theme.Home.Featured.TitleFontSize,
fontWeight : Theme.Home.Featured.TitleFontWeight
}
}),
|
random_line_split
|
Featured.js
|
var
// get util library
util = require("core/Util");
function Featured(options){
var self = Ti.UI.createTableView({
width : Ti.UI.FILL,
backgroundColor : Theme.Home.Featured.HeaderBackgroundColor,
height : options.height || null
});
return {
get : function(){
return self;
},
/*
* Featured row factory method
*
* @param {String} name: the product name to display
* @param {String} image: the icon image to display
* @param {String} desc: description of item to display in row
* @param {String} itemId: item id used to load product page
*/
createRow : function(name, image, desc, itemId){
var row = Ti.UI.createTableViewRow({
className : "featured_rows",
backgroundColor : Theme.Home.Featured.RowsBackgroundColor,
selectedBackgroundColor : Theme.Home.Featured.SelectedBackgroundColor,
hasChild : true
}),
img = Ti.UI.createImageView({
image : image,
left : 1,
top : 1,
borderWidth : 3,
borderColor : Theme.Home.Featured.ImageBorderColor,
defaultImage : Config.PRODUCTS_DEFAULT_THUMB_IMAGE
}),
bodyView = Ti.UI.createView({
layout : "vertical"
}),
title = Ti.UI.createLabel({
text : name,
minimumFontSize : 12,
color : Theme.Home.Featured.TitleColor,
height : Ti.UI.SIZE,
left : 2,
top : 4,
font : {
fontSize : Theme.Home.Featured.TitleFontSize,
fontWeight : Theme.Home.Featured.TitleFontWeight
}
}),
body = Ti.UI.createLabel({
text : desc,
height : Ti.UI.SIZE,
left : 2,
top : 2,
color : Theme.Home.Featured.DescriptionColor,
font : {
fontSize : Theme.Home.Featured.DescriptionFontSize,
fontWeight : Theme.Home.Featured.DescriptionFontWeight
}
});
// assemble row
bodyView.add(title);
bodyView.add(body);
row.add(img);
if(util.osname==="android"){
img.width = Theme.Home.Featured.ImageWidth + "dip";
img.height = Theme.Home.Featured.ImageHeight + "dip";
bodyView.left = (Theme.Home.Featured.ImageWidth + 1) + "dip";
bodyView.right = "3dip";
bodyView.top = 0;
bodyView.bottom = 2;
body.height = Ti.UI.SIZE;
}
else
|
row.add(bodyView);
// handle featured item click event
row.addEventListener(
"click",
function(e){
Ti.App.fireEvent(
"APP:SHOW_PRODUCT",
{ "itemId" : itemId, "tab" : "Home" }
);
}
);
return row;
}
}
}
exports.create = function(options){
return Featured(options);
};
|
{
img.width = Theme.Home.Featured.ImageWidth;
bodyView.left = Theme.Home.Featured.ImageWidth + 1;
bodyView.height = Ti.UI.SIZE;
}
|
conditional_block
|
Featured.js
|
var
// get util library
util = require("core/Util");
function Featured(options)
|
exports.create = function(options){
return Featured(options);
};
|
{
var self = Ti.UI.createTableView({
width : Ti.UI.FILL,
backgroundColor : Theme.Home.Featured.HeaderBackgroundColor,
height : options.height || null
});
return {
get : function(){
return self;
},
/*
* Featured row factory method
*
* @param {String} name: the product name to display
* @param {String} image: the icon image to display
* @param {String} desc: description of item to display in row
* @param {String} itemId: item id used to load product page
*/
createRow : function(name, image, desc, itemId){
var row = Ti.UI.createTableViewRow({
className : "featured_rows",
backgroundColor : Theme.Home.Featured.RowsBackgroundColor,
selectedBackgroundColor : Theme.Home.Featured.SelectedBackgroundColor,
hasChild : true
}),
img = Ti.UI.createImageView({
image : image,
left : 1,
top : 1,
borderWidth : 3,
borderColor : Theme.Home.Featured.ImageBorderColor,
defaultImage : Config.PRODUCTS_DEFAULT_THUMB_IMAGE
}),
bodyView = Ti.UI.createView({
layout : "vertical"
}),
title = Ti.UI.createLabel({
text : name,
minimumFontSize : 12,
color : Theme.Home.Featured.TitleColor,
height : Ti.UI.SIZE,
left : 2,
top : 4,
font : {
fontSize : Theme.Home.Featured.TitleFontSize,
fontWeight : Theme.Home.Featured.TitleFontWeight
}
}),
body = Ti.UI.createLabel({
text : desc,
height : Ti.UI.SIZE,
left : 2,
top : 2,
color : Theme.Home.Featured.DescriptionColor,
font : {
fontSize : Theme.Home.Featured.DescriptionFontSize,
fontWeight : Theme.Home.Featured.DescriptionFontWeight
}
});
// assemble row
bodyView.add(title);
bodyView.add(body);
row.add(img);
if(util.osname==="android"){
img.width = Theme.Home.Featured.ImageWidth + "dip";
img.height = Theme.Home.Featured.ImageHeight + "dip";
bodyView.left = (Theme.Home.Featured.ImageWidth + 1) + "dip";
bodyView.right = "3dip";
bodyView.top = 0;
bodyView.bottom = 2;
body.height = Ti.UI.SIZE;
}
else{
img.width = Theme.Home.Featured.ImageWidth;
bodyView.left = Theme.Home.Featured.ImageWidth + 1;
bodyView.height = Ti.UI.SIZE;
}
row.add(bodyView);
// handle featured item click event
row.addEventListener(
"click",
function(e){
Ti.App.fireEvent(
"APP:SHOW_PRODUCT",
{ "itemId" : itemId, "tab" : "Home" }
);
}
);
return row;
}
}
}
|
identifier_body
|
Featured.js
|
var
// get util library
util = require("core/Util");
function
|
(options){
var self = Ti.UI.createTableView({
width : Ti.UI.FILL,
backgroundColor : Theme.Home.Featured.HeaderBackgroundColor,
height : options.height || null
});
return {
get : function(){
return self;
},
/*
* Featured row factory method
*
* @param {String} name: the product name to display
* @param {String} image: the icon image to display
* @param {String} desc: description of item to display in row
* @param {String} itemId: item id used to load product page
*/
createRow : function(name, image, desc, itemId){
var row = Ti.UI.createTableViewRow({
className : "featured_rows",
backgroundColor : Theme.Home.Featured.RowsBackgroundColor,
selectedBackgroundColor : Theme.Home.Featured.SelectedBackgroundColor,
hasChild : true
}),
img = Ti.UI.createImageView({
image : image,
left : 1,
top : 1,
borderWidth : 3,
borderColor : Theme.Home.Featured.ImageBorderColor,
defaultImage : Config.PRODUCTS_DEFAULT_THUMB_IMAGE
}),
bodyView = Ti.UI.createView({
layout : "vertical"
}),
title = Ti.UI.createLabel({
text : name,
minimumFontSize : 12,
color : Theme.Home.Featured.TitleColor,
height : Ti.UI.SIZE,
left : 2,
top : 4,
font : {
fontSize : Theme.Home.Featured.TitleFontSize,
fontWeight : Theme.Home.Featured.TitleFontWeight
}
}),
body = Ti.UI.createLabel({
text : desc,
height : Ti.UI.SIZE,
left : 2,
top : 2,
color : Theme.Home.Featured.DescriptionColor,
font : {
fontSize : Theme.Home.Featured.DescriptionFontSize,
fontWeight : Theme.Home.Featured.DescriptionFontWeight
}
});
// assemble row
bodyView.add(title);
bodyView.add(body);
row.add(img);
if(util.osname==="android"){
img.width = Theme.Home.Featured.ImageWidth + "dip";
img.height = Theme.Home.Featured.ImageHeight + "dip";
bodyView.left = (Theme.Home.Featured.ImageWidth + 1) + "dip";
bodyView.right = "3dip";
bodyView.top = 0;
bodyView.bottom = 2;
body.height = Ti.UI.SIZE;
}
else{
img.width = Theme.Home.Featured.ImageWidth;
bodyView.left = Theme.Home.Featured.ImageWidth + 1;
bodyView.height = Ti.UI.SIZE;
}
row.add(bodyView);
// handle featured item click event
row.addEventListener(
"click",
function(e){
Ti.App.fireEvent(
"APP:SHOW_PRODUCT",
{ "itemId" : itemId, "tab" : "Home" }
);
}
);
return row;
}
}
}
exports.create = function(options){
return Featured(options);
};
|
Featured
|
identifier_name
|
module.js
|
define(['angular','search/module-name','kylo-utils/LazyLoadUtil','constants/AccessConstants', 'kylo-services','kylo-feedmgr'], function (angular,moduleName,lazyLoadUtil,AccessConstants) {
var module = angular.module(moduleName, []);
module.config(['$stateProvider',function ($stateProvider) {
$stateProvider.state(AccessConstants.UI_STATES.SEARCH.state,{
url:'/search',
params: {
},
views: {
'content': {
templateUrl: 'js/search/common/search.html',
controller: "SearchController",
controllerAs: "vm"
}
},
resolve: {
loadMyCtrl: lazyLoadController(['search/common/SearchController'])
},
data:{
breadcrumbRoot:false,
displayName:'Search',
module:moduleName,
permissions:AccessConstants.UI_STATES.SEARCH.permissions
}
})
function lazyLoadController(path)
|
}]);
return module;
});
|
{
return lazyLoadUtil.lazyLoadController(path,'search/module-require');
}
|
identifier_body
|
module.js
|
define(['angular','search/module-name','kylo-utils/LazyLoadUtil','constants/AccessConstants', 'kylo-services','kylo-feedmgr'], function (angular,moduleName,lazyLoadUtil,AccessConstants) {
var module = angular.module(moduleName, []);
module.config(['$stateProvider',function ($stateProvider) {
$stateProvider.state(AccessConstants.UI_STATES.SEARCH.state,{
url:'/search',
params: {
},
views: {
'content': {
templateUrl: 'js/search/common/search.html',
controller: "SearchController",
controllerAs: "vm"
}
},
resolve: {
loadMyCtrl: lazyLoadController(['search/common/SearchController'])
},
data:{
breadcrumbRoot:false,
displayName:'Search',
module:moduleName,
permissions:AccessConstants.UI_STATES.SEARCH.permissions
}
})
function
|
(path){
return lazyLoadUtil.lazyLoadController(path,'search/module-require');
}
}]);
return module;
});
|
lazyLoadController
|
identifier_name
|
module.js
|
define(['angular','search/module-name','kylo-utils/LazyLoadUtil','constants/AccessConstants', 'kylo-services','kylo-feedmgr'], function (angular,moduleName,lazyLoadUtil,AccessConstants) {
var module = angular.module(moduleName, []);
module.config(['$stateProvider',function ($stateProvider) {
$stateProvider.state(AccessConstants.UI_STATES.SEARCH.state,{
url:'/search',
params: {
},
views: {
'content': {
templateUrl: 'js/search/common/search.html',
controller: "SearchController",
controllerAs: "vm"
}
},
resolve: {
loadMyCtrl: lazyLoadController(['search/common/SearchController'])
|
breadcrumbRoot:false,
displayName:'Search',
module:moduleName,
permissions:AccessConstants.UI_STATES.SEARCH.permissions
}
})
function lazyLoadController(path){
return lazyLoadUtil.lazyLoadController(path,'search/module-require');
}
}]);
return module;
});
|
},
data:{
|
random_line_split
|
mod.rs
|
//! Module containing various utility functions.
mod os;
mod webdav;
mod content_encoding;
use base64;
use std::path::Path;
use percent_encoding;
use walkdir::WalkDir;
use std::borrow::Cow;
use rfsapi::RawFileData;
use std::{cmp, f64, str};
use std::time::SystemTime;
use std::collections::HashMap;
use time::{self, Duration, Tm};
use iron::{mime, Headers, Url};
use base64::display::Base64Display;
use std::fmt::{self, Write as FmtWrite};
use iron::error::HttpResult as HyperResult;
use std::fs::{self, FileType, Metadata, File};
use iron::headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree .
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image, ..)) |
Some(mime::Mime(mime::TopLevel::Video, ..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text, ..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application, ..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
|
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
}
for entry in WalkDir::new(&from).min_depth(1).into_iter().flatten() {
let source_metadata = match entry.metadata() {
Ok(md) => md,
Err(err) => {
errors.push((err.into(), entry.path().to_string_lossy().into_owned()));
continue;
}
};
let relative_path = entry.path().strip_prefix(&from).expect("strip_prefix failed; this is a probably a bug in copy_dir");
let target_path = to.join(relative_path);
if !is_actually_file(&source_metadata.file_type(), entry.path()) {
push_error!(errors, relative_path, fs::create_dir(&target_path));
push_error!(errors, relative_path, fs::set_permissions(&target_path, source_metadata.permissions()));
} else {
push_error!(errors, relative_path, fs::copy(entry.path(), &target_path));
}
}
Ok(errors)
}
|
""
}
|
random_line_split
|
mod.rs
|
//! Module containing various utility functions.
mod os;
mod webdav;
mod content_encoding;
use base64;
use std::path::Path;
use percent_encoding;
use walkdir::WalkDir;
use std::borrow::Cow;
use rfsapi::RawFileData;
use std::{cmp, f64, str};
use std::time::SystemTime;
use std::collections::HashMap;
use time::{self, Duration, Tm};
use iron::{mime, Headers, Url};
use base64::display::Base64Display;
use std::fmt::{self, Write as FmtWrite};
use iron::error::HttpResult as HyperResult;
use std::fs::{self, FileType, Metadata, File};
use iron::headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n>
|
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree .
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image, ..)) |
Some(mime::Mime(mime::TopLevel::Video, ..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text, ..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application, ..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
}
for entry in WalkDir::new(&from).min_depth(1).into_iter().flatten() {
let source_metadata = match entry.metadata() {
Ok(md) => md,
Err(err) => {
errors.push((err.into(), entry.path().to_string_lossy().into_owned()));
continue;
}
};
let relative_path = entry.path().strip_prefix(&from).expect("strip_prefix failed; this is a probably a bug in copy_dir");
let target_path = to.join(relative_path);
if !is_actually_file(&source_metadata.file_type(), entry.path()) {
push_error!(errors, relative_path, fs::create_dir(&target_path));
push_error!(errors, relative_path, fs::set_permissions(&target_path, source_metadata.permissions()));
} else {
push_error!(errors, relative_path, fs::copy(entry.path(), &target_path));
}
}
Ok(errors)
}
|
{
*self
}
|
identifier_body
|
mod.rs
|
//! Module containing various utility functions.
mod os;
mod webdav;
mod content_encoding;
use base64;
use std::path::Path;
use percent_encoding;
use walkdir::WalkDir;
use std::borrow::Cow;
use rfsapi::RawFileData;
use std::{cmp, f64, str};
use std::time::SystemTime;
use std::collections::HashMap;
use time::{self, Duration, Tm};
use iron::{mime, Headers, Url};
use base64::display::Base64Display;
use std::fmt::{self, Write as FmtWrite};
use iron::error::HttpResult as HyperResult;
use std::fs::{self, FileType, Metadata, File};
use iron::headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree .
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return f
|
f_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image, ..)) |
Some(mime::Mime(mime::TopLevel::Video, ..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text, ..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application, ..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
}
for entry in WalkDir::new(&from).min_depth(1).into_iter().flatten() {
let source_metadata = match entry.metadata() {
Ok(md) => md,
Err(err) => {
errors.push((err.into(), entry.path().to_string_lossy().into_owned()));
continue;
}
};
let relative_path = entry.path().strip_prefix(&from).expect("strip_prefix failed; this is a probably a bug in copy_dir");
let target_path = to.join(relative_path);
if !is_actually_file(&source_metadata.file_type(), entry.path()) {
push_error!(errors, relative_path, fs::create_dir(&target_path));
push_error!(errors, relative_path, fs::set_permissions(&target_path, source_metadata.permissions()));
} else {
push_error!(errors, relative_path, fs::copy(entry.path(), &target_path));
}
}
Ok(errors)
}
|
alse;
};
if who == o
|
conditional_block
|
mod.rs
|
//! Module containing various utility functions.
mod os;
mod webdav;
mod content_encoding;
use base64;
use std::path::Path;
use percent_encoding;
use walkdir::WalkDir;
use std::borrow::Cow;
use rfsapi::RawFileData;
use std::{cmp, f64, str};
use std::time::SystemTime;
use std::collections::HashMap;
use time::{self, Duration, Tm};
use iron::{mime, Headers, Url};
use base64::display::Base64Display;
use std::fmt::{self, Write as FmtWrite};
use iron::error::HttpResult as HyperResult;
use std::fs::{self, FileType, Metadata, File};
use iron::headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(tim
|
-> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree .
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image, ..)) |
Some(mime::Mime(mime::TopLevel::Video, ..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text, ..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application, ..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
}
for entry in WalkDir::new(&from).min_depth(1).into_iter().flatten() {
let source_metadata = match entry.metadata() {
Ok(md) => md,
Err(err) => {
errors.push((err.into(), entry.path().to_string_lossy().into_owned()));
continue;
}
};
let relative_path = entry.path().strip_prefix(&from).expect("strip_prefix failed; this is a probably a bug in copy_dir");
let target_path = to.join(relative_path);
if !is_actually_file(&source_metadata.file_type(), entry.path()) {
push_error!(errors, relative_path, fs::create_dir(&target_path));
push_error!(errors, relative_path, fs::set_permissions(&target_path, source_metadata.permissions()));
} else {
push_error!(errors, relative_path, fs::copy(entry.path(), &target_path));
}
}
Ok(errors)
}
|
e: SystemTime)
|
identifier_name
|
categoriesFormView.js
|
define([
'jquery',
'underscore',
'backbone',
'mustache',
'initView',
'text!templates/categories/categoriesItem.mustache',
'text!templates/categories/categoriesForm.mustache',
'categoryModel',
'categoryCollection',
'storage'
],
function(
$,
_,
Backbone,
Mustache,
InitView,
CategoriesItemTemplate,
CategoriesFormTemplate,
Category,
CategoryCollection,
storage) {
var CategoryFormView = Backbone.View.extend({
el: $("#content"),
displayForm: function(categorie){
var categories_actives = storage.categories.enable();
var template = Mustache.render(CategoriesFormTemplate, {
'categorie': categorie,
'categories': new CategoryCollection(categories_actives).toJSON()
});
$("#content").html(template);
var defaultIcon = "fa-circle-o";
// Put select markup as selected
if (categorie) {
$("#cat_form select[name='parent']").find('option[value="' + categorie.parent_id + '"]').attr('selected', true);
defaultIcon = categorie.icon;
}
$('#cat_form .iconpicker').iconpicker({
arrowClass: 'btn-primary',
arrowPrevIconClass: 'glyphicon glyphicon-chevron-left',
arrowNextIconClass: 'glyphicon glyphicon-chevron-right',
cols: 5,
icon: defaultIcon,
iconset: 'fontawesome',
labelHeader: '{0} of {1} pages',
labelFooter: '{0} - {1} of {2} icons',
placement: 'bottom',
rows: 5,
search: true,
searchText: 'Search',
selectedClass: 'btn-primary',
unselectedClass: ''
});
$('#cat_form .colorpicker').colorpicker();
// User validate form
$("#cat_form").on("submit", function() {
var array = $("#cat_form").serializeArray();
var dict = {};
for (var i = 0; i < array.length; i++) {
dict[array[i]['name']] = array[i]['value']
}
dict['user'] = storage.user.get('id');
if (dict.parent && dict.parent !== "") {
dict.parent = storage.categories.get(dict.parent).url();
}
var category = new Category(dict);
Backbone.Validation.bind(this, {
model: category,
valid: function(view, attr) {
// Check if all are required
$(view).find('input[name=' + attr + '], select[name=' + attr + ']')
.parent()
.removeClass('has-error')
.addClass('has-success')
.prev().html('');
},
invalid: function(view, attr, error) {
$(view).find('input[name=' + attr + '], select[name=' + attr + ']')
.parent()
.addClass('has-error')
.removeClass('has-success')
.prev().html(error);
}
});
category.validate();
if (category.isValid()) {
category.save(dict, {
wait: true,
success: function(model, response) {
storage.categories.fetch({
success: function(){
Backbone.history.navigate("#/categories", {
trigger: true
});
}
});
},
error: function(model, error) {
console.log(model.toJSON());
console.log('error.responseText');
}
});
}
return false;
});
},
render: function(categorie_id) {
var initView = new InitView();
if (initView.isLoaded() === false) {
initView.render();
}
initView.changeSelectedItem("nav_categories");
var view = this;
require(['bootstrap-iconpicker', 'bootstrap-colorpicker'], function() {
if(categorie_id){
var categorie = new Category({id: categorie_id});
categorie.fetch({
success: function (c) {
view.displayForm(categorie.toJSON());
}
});
}else
|
});
}
});
return CategoryFormView;
});
|
{
view.displayForm();
}
|
conditional_block
|
categoriesFormView.js
|
define([
'jquery',
'underscore',
'backbone',
'mustache',
'initView',
'text!templates/categories/categoriesItem.mustache',
'text!templates/categories/categoriesForm.mustache',
'categoryModel',
'categoryCollection',
'storage'
],
function(
$,
_,
Backbone,
Mustache,
|
InitView,
CategoriesItemTemplate,
CategoriesFormTemplate,
Category,
CategoryCollection,
storage) {
var CategoryFormView = Backbone.View.extend({
el: $("#content"),
displayForm: function(categorie){
var categories_actives = storage.categories.enable();
var template = Mustache.render(CategoriesFormTemplate, {
'categorie': categorie,
'categories': new CategoryCollection(categories_actives).toJSON()
});
$("#content").html(template);
var defaultIcon = "fa-circle-o";
// Put select markup as selected
if (categorie) {
$("#cat_form select[name='parent']").find('option[value="' + categorie.parent_id + '"]').attr('selected', true);
defaultIcon = categorie.icon;
}
$('#cat_form .iconpicker').iconpicker({
arrowClass: 'btn-primary',
arrowPrevIconClass: 'glyphicon glyphicon-chevron-left',
arrowNextIconClass: 'glyphicon glyphicon-chevron-right',
cols: 5,
icon: defaultIcon,
iconset: 'fontawesome',
labelHeader: '{0} of {1} pages',
labelFooter: '{0} - {1} of {2} icons',
placement: 'bottom',
rows: 5,
search: true,
searchText: 'Search',
selectedClass: 'btn-primary',
unselectedClass: ''
});
$('#cat_form .colorpicker').colorpicker();
// User validate form
$("#cat_form").on("submit", function() {
var array = $("#cat_form").serializeArray();
var dict = {};
for (var i = 0; i < array.length; i++) {
dict[array[i]['name']] = array[i]['value']
}
dict['user'] = storage.user.get('id');
if (dict.parent && dict.parent !== "") {
dict.parent = storage.categories.get(dict.parent).url();
}
var category = new Category(dict);
Backbone.Validation.bind(this, {
model: category,
valid: function(view, attr) {
// Check if all are required
$(view).find('input[name=' + attr + '], select[name=' + attr + ']')
.parent()
.removeClass('has-error')
.addClass('has-success')
.prev().html('');
},
invalid: function(view, attr, error) {
$(view).find('input[name=' + attr + '], select[name=' + attr + ']')
.parent()
.addClass('has-error')
.removeClass('has-success')
.prev().html(error);
}
});
category.validate();
if (category.isValid()) {
category.save(dict, {
wait: true,
success: function(model, response) {
storage.categories.fetch({
success: function(){
Backbone.history.navigate("#/categories", {
trigger: true
});
}
});
},
error: function(model, error) {
console.log(model.toJSON());
console.log('error.responseText');
}
});
}
return false;
});
},
render: function(categorie_id) {
var initView = new InitView();
if (initView.isLoaded() === false) {
initView.render();
}
initView.changeSelectedItem("nav_categories");
var view = this;
require(['bootstrap-iconpicker', 'bootstrap-colorpicker'], function() {
if(categorie_id){
var categorie = new Category({id: categorie_id});
categorie.fetch({
success: function (c) {
view.displayForm(categorie.toJSON());
}
});
}else{
view.displayForm();
}
});
}
});
return CategoryFormView;
});
|
random_line_split
|
|
record-pat.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct
|
{x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
|
T2
|
identifier_name
|
record-pat.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) =>
|
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
|
{ return m; }
|
conditional_block
|
record-pat.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) => { return m; }
|
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
|
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
|
random_line_split
|
length_limit.rs
|
//! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()>
|
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when
// a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn close_all_tags(&mut self) {
while !self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests;
|
{
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
}
|
identifier_body
|
length_limit.rs
|
//! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()> {
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
}
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when
|
// a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn close_all_tags(&mut self) {
while !self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests;
|
random_line_split
|
|
length_limit.rs
|
//! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()> {
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
}
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when
// a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn
|
(&mut self) {
while !self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests;
|
close_all_tags
|
identifier_name
|
constraints.py
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def
|
(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
update
|
identifier_name
|
constraints.py
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
|
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
|
identifier_body
|
constraints.py
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
|
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
continue
|
conditional_block
|
constraints.py
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
|
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
|
random_line_split
|
tuple_impl.rs
|
//! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
|
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items() != 1 {
// put in a duplicate item in front of the tuple; this simplifies
// .next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
|
impl<T> Iterator for TupleBuffer<T>
|
random_line_split
|
tuple_impl.rs
|
//! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items() != 1 {
// put in a duplicate item in front of the tuple; this simplifies
// .next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T>
|
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
|
{
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
|
identifier_body
|
tuple_impl.rs
|
//! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else
|
;
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items() != 1 {
// put in a duplicate item in front of the tuple; this simplifies
// .next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
|
{
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
}
|
conditional_block
|
tuple_impl.rs
|
//! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items() != 1 {
// put in a duplicate item in front of the tuple; this simplifies
// .next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn
|
(&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
|
next
|
identifier_name
|
draft.py
|
from datetime import datetime
from xmodule.modulestore import Location, namedtuple_to_son
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore.mongo.base import MongoModuleStore
from pytz import UTC
DRAFT = 'draft'
# Things w/ these categories should never be marked as version='draft'
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location).replace(revision=None)
def wrap_draft(item):
"""
Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location.replace(revision=None)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
That is, edits made to units are stored to locations that have the revision DRAFT,
and when reads are made, they first read with revision DRAFT, and then fall back
to the baseline revision only if DRAFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and optionally
their children) to published modules.
"""
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If location.revision is None, returns the item with the most
recent revision
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
Get an instance of this location, with policy for course_id applied.
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def get_items(self, location, course_id=None, depth=0):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
depth: An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
draft_loc = as_draft(location)
draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location.replace(revision=None) for item in draft_items)
non_draft_items = [
item
for item in items
if (item.location.revision != DRAFT
and item.location.replace(revision=None) not in draft_locs_found)
]
return [wrap_draft(item) for item in draft_items + non_draft_items]
def clone_item(self, source, location):
"""
Clone a new item that is a copy of the item at the location `source`
and writes it to `location`
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
return wrap_draft(super(DraftModuleStore, self).clone_item(source, as_draft(location)))
def update_item(self, location, data, allow_not_found=False):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
draft_loc = as_draft(location)
try:
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
except ItemNotFoundError, e:
if not allow_not_found:
raise e
return super(DraftModuleStore, self).update_item(draft_loc, data)
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
children
location: Something that can be passed to Location
children: A list of child item identifiers
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
metadata: A nested dictionary of module metadata
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
del metadata['is_draft']
return super(DraftModuleStore, self).update_metadata(draft_loc, metadata)
def delete_item(self, location, delete_all_versions=False):
"""
Delete an item from this modulestore
location: Something that can be passed to Location
"""
super(DraftModuleStore, self).delete_item(as_draft(location))
if delete_all_versions:
super(DraftModuleStore, self).delete_item(as_published(location))
return
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location. Needed
for path_to_location().
returns an iterable of things that can be passed to Location.
'''
return super(DraftModuleStore, self).get_parent_locations(location, course_id)
def publish(self, location, published_by_id):
"""
Save a current draft to the underlying modulestore
"""
draft = self.get_item(location)
draft.cms.published_date = datetime.now(UTC)
draft.cms.published_by = published_by_id
super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data)
super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children)
super(DraftModuleStore, self).update_metadata(location, own_metadata(draft))
self.delete_item(location)
def unpublish(self, location):
"""
Turn the published version into a draft, removing the published version
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
|
super(DraftModuleStore, self).clone_item(location, as_draft(location))
super(DraftModuleStore, self).delete_item(location)
def _query_children_for_cache_children(self, items):
# first get non-draft in a round-trip
queried_children = []
to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items)
to_process_dict = {}
for non_draft in to_process_non_drafts:
to_process_dict[Location(non_draft["_id"])] = non_draft
# now query all draft content in another round-trip
query = {
'_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]}
}
to_process_drafts = list(self.collection.find(query))
# now we have to go through all drafts and replace the non-draft
# with the draft. This is because the semantics of the DraftStore is to
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection
# if so, replace it
if draft_as_non_draft_loc in to_process_dict:
to_process_dict[draft_as_non_draft_loc] = draft
# convert the dict - which is used for look ups - back into a list
for key, value in to_process_dict.iteritems():
queried_children.append(value)
return queried_children
|
raise InvalidVersionError(location)
|
conditional_block
|
draft.py
|
from datetime import datetime
from xmodule.modulestore import Location, namedtuple_to_son
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore.mongo.base import MongoModuleStore
from pytz import UTC
DRAFT = 'draft'
# Things w/ these categories should never be marked as version='draft'
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location).replace(revision=None)
def wrap_draft(item):
"""
Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location.replace(revision=None)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
That is, edits made to units are stored to locations that have the revision DRAFT,
and when reads are made, they first read with revision DRAFT, and then fall back
to the baseline revision only if DRAFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and optionally
their children) to published modules.
"""
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If location.revision is None, returns the item with the most
recent revision
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
Get an instance of this location, with policy for course_id applied.
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def
|
(self, location, course_id=None, depth=0):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
depth: An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
draft_loc = as_draft(location)
draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location.replace(revision=None) for item in draft_items)
non_draft_items = [
item
for item in items
if (item.location.revision != DRAFT
and item.location.replace(revision=None) not in draft_locs_found)
]
return [wrap_draft(item) for item in draft_items + non_draft_items]
def clone_item(self, source, location):
"""
Clone a new item that is a copy of the item at the location `source`
and writes it to `location`
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
return wrap_draft(super(DraftModuleStore, self).clone_item(source, as_draft(location)))
def update_item(self, location, data, allow_not_found=False):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
draft_loc = as_draft(location)
try:
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
except ItemNotFoundError, e:
if not allow_not_found:
raise e
return super(DraftModuleStore, self).update_item(draft_loc, data)
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
children
location: Something that can be passed to Location
children: A list of child item identifiers
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
metadata: A nested dictionary of module metadata
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
del metadata['is_draft']
return super(DraftModuleStore, self).update_metadata(draft_loc, metadata)
def delete_item(self, location, delete_all_versions=False):
"""
Delete an item from this modulestore
location: Something that can be passed to Location
"""
super(DraftModuleStore, self).delete_item(as_draft(location))
if delete_all_versions:
super(DraftModuleStore, self).delete_item(as_published(location))
return
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location. Needed
for path_to_location().
returns an iterable of things that can be passed to Location.
'''
return super(DraftModuleStore, self).get_parent_locations(location, course_id)
def publish(self, location, published_by_id):
"""
Save a current draft to the underlying modulestore
"""
draft = self.get_item(location)
draft.cms.published_date = datetime.now(UTC)
draft.cms.published_by = published_by_id
super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data)
super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children)
super(DraftModuleStore, self).update_metadata(location, own_metadata(draft))
self.delete_item(location)
def unpublish(self, location):
"""
Turn the published version into a draft, removing the published version
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
super(DraftModuleStore, self).clone_item(location, as_draft(location))
super(DraftModuleStore, self).delete_item(location)
def _query_children_for_cache_children(self, items):
# first get non-draft in a round-trip
queried_children = []
to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items)
to_process_dict = {}
for non_draft in to_process_non_drafts:
to_process_dict[Location(non_draft["_id"])] = non_draft
# now query all draft content in another round-trip
query = {
'_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]}
}
to_process_drafts = list(self.collection.find(query))
# now we have to go through all drafts and replace the non-draft
# with the draft. This is because the semantics of the DraftStore is to
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection
# if so, replace it
if draft_as_non_draft_loc in to_process_dict:
to_process_dict[draft_as_non_draft_loc] = draft
# convert the dict - which is used for look ups - back into a list
for key, value in to_process_dict.iteritems():
queried_children.append(value)
return queried_children
|
get_items
|
identifier_name
|
draft.py
|
from datetime import datetime
from xmodule.modulestore import Location, namedtuple_to_son
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore.mongo.base import MongoModuleStore
from pytz import UTC
DRAFT = 'draft'
# Things w/ these categories should never be marked as version='draft'
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location).replace(revision=None)
def wrap_draft(item):
"""
Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location.replace(revision=None)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
That is, edits made to units are stored to locations that have the revision DRAFT,
and when reads are made, they first read with revision DRAFT, and then fall back
to the baseline revision only if DRAFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and optionally
their children) to published modules.
"""
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If location.revision is None, returns the item with the most
recent revision
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
Get an instance of this location, with policy for course_id applied.
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def get_items(self, location, course_id=None, depth=0):
|
def clone_item(self, source, location):
"""
Clone a new item that is a copy of the item at the location `source`
and writes it to `location`
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
return wrap_draft(super(DraftModuleStore, self).clone_item(source, as_draft(location)))
def update_item(self, location, data, allow_not_found=False):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
draft_loc = as_draft(location)
try:
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
except ItemNotFoundError, e:
if not allow_not_found:
raise e
return super(DraftModuleStore, self).update_item(draft_loc, data)
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
children
location: Something that can be passed to Location
children: A list of child item identifiers
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
metadata: A nested dictionary of module metadata
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
del metadata['is_draft']
return super(DraftModuleStore, self).update_metadata(draft_loc, metadata)
def delete_item(self, location, delete_all_versions=False):
"""
Delete an item from this modulestore
location: Something that can be passed to Location
"""
super(DraftModuleStore, self).delete_item(as_draft(location))
if delete_all_versions:
super(DraftModuleStore, self).delete_item(as_published(location))
return
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location. Needed
for path_to_location().
returns an iterable of things that can be passed to Location.
'''
return super(DraftModuleStore, self).get_parent_locations(location, course_id)
def publish(self, location, published_by_id):
"""
Save a current draft to the underlying modulestore
"""
draft = self.get_item(location)
draft.cms.published_date = datetime.now(UTC)
draft.cms.published_by = published_by_id
super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data)
super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children)
super(DraftModuleStore, self).update_metadata(location, own_metadata(draft))
self.delete_item(location)
def unpublish(self, location):
"""
Turn the published version into a draft, removing the published version
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
super(DraftModuleStore, self).clone_item(location, as_draft(location))
super(DraftModuleStore, self).delete_item(location)
def _query_children_for_cache_children(self, items):
# first get non-draft in a round-trip
queried_children = []
to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items)
to_process_dict = {}
for non_draft in to_process_non_drafts:
to_process_dict[Location(non_draft["_id"])] = non_draft
# now query all draft content in another round-trip
query = {
'_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]}
}
to_process_drafts = list(self.collection.find(query))
# now we have to go through all drafts and replace the non-draft
# with the draft. This is because the semantics of the DraftStore is to
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection
# if so, replace it
if draft_as_non_draft_loc in to_process_dict:
to_process_dict[draft_as_non_draft_loc] = draft
# convert the dict - which is used for look ups - back into a list
for key, value in to_process_dict.iteritems():
queried_children.append(value)
return queried_children
|
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
depth: An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
draft_loc = as_draft(location)
draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location.replace(revision=None) for item in draft_items)
non_draft_items = [
item
for item in items
if (item.location.revision != DRAFT
and item.location.replace(revision=None) not in draft_locs_found)
]
return [wrap_draft(item) for item in draft_items + non_draft_items]
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.