file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
FloatingLabel.js
|
import React, { PureComponent } from 'react';
import PropTypes from 'prop-types';
import { Animated, StyleSheet } from 'react-native';
import { H6 } from '@ui/typography';
import styled from '@ui/styled';
export const LabelText = styled(({ theme }) => ({
color: theme.colors.text.secondary,
backgroundColor: 'transparent',
paddingVertical: theme.sizing.baseUnit / 4,
}), 'FloatingLabel.LabelText')(H6);
const styles = StyleSheet.create({
floatLabelView: {
position: 'absolute',
bottom: 0,
top: 0,
justifyContent: 'center',
},
});
class FloatingLabel extends PureComponent {
static propTypes = {
children: PropTypes.node,
animation: PropTypes.shape({
interpolate: PropTypes.func,
}),
scaleSize: PropTypes.number, // how much smaller to make label when focused
floatingOpacity: PropTypes.number,
};
static defaultProps = {
animation: new Animated.Value(0),
scaleSize: 0.8,
floatingOpacity: 0.8,
};
state = {
labelWidth: 0,
labelHeight: 0,
};
handleLayout = ({ nativeEvent: { layout } }) => {
this.setState({
labelWidth: layout.width,
labelHeight: layout.height,
});
};
|
() {
const scaledWidth = this.state.labelWidth * (1.05 - this.props.scaleSize);
const sideScaledWidth = scaledWidth / 2;
const scale = this.props.animation.interpolate({
inputRange: [0, 1],
outputRange: [1, this.props.scaleSize],
});
const opacity = this.props.animation.interpolate({
inputRange: [0, 1],
outputRange: [1, this.props.floatingOpacity],
});
const translateY = this.props.animation.interpolate({
inputRange: [0, 1],
outputRange: [0, -(this.state.labelHeight * 0.7)],
});
const translateX = this.props.animation.interpolate({
inputRange: [0, 1],
outputRange: [0, -sideScaledWidth],
});
const wrapperStyles = {
transform: [{ scale }, { translateX }, { translateY }],
opacity,
};
return (
<Animated.View
pointerEvents="none"
onLayout={this.handleLayout}
style={[styles.floatLabelView, wrapperStyles]}
>
<LabelText>
{this.props.children}
</LabelText>
</Animated.View>
);
}
}
export default FloatingLabel;
|
render
|
identifier_name
|
setup.py
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name='SecFS',
version='0.1.0',
description='6.858 final project --- an encrypted and authenticated file system',
long_description= open('README.md', 'r').read(),
author='Jon Gjengset',
author_email='[email protected]',
maintainer='MIT PDOS',
|
scripts=['bin/secfs-server', 'bin/secfs-fuse'],
license='MIT',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Education",
"Topic :: Security",
"Topic :: System :: Filesystems",
]
)
|
maintainer_email='[email protected]',
url='https://github.com/mit-pdos/6.858-secfs',
packages=['secfs', 'secfs.store'],
install_requires=['llfuse', 'Pyro4', 'serpent', 'cryptography'],
|
random_line_split
|
build.js
|
#!/usr/bin/env node
/**
* Build script for /tg/station 13 codebase.
*
* This script uses Juke Build, read the docs here:
* https://github.com/stylemistake/juke-build
*
* @file
* @copyright 2021 Aleksej Komarov
* @license MIT
*/
import fs from 'fs';
import { DreamDaemon, DreamMaker } from './lib/byond.js';
import { yarn } from './lib/yarn.js';
import Juke from './juke/index.js';
Juke.chdir('../..', import.meta.url);
Juke.setup({ file: import.meta.url }).then((code) => process.exit(code));
const DME_NAME = 'tgstation';
export const DefineParameter = new Juke.Parameter({
type: 'string[]',
alias: 'D',
});
export const PortParameter = new Juke.Parameter({
type: 'string',
alias: 'p',
});
export const CiParameter = new Juke.Parameter({
type: 'boolean',
});
export const DmMapsIncludeTarget = new Juke.Target({
executes: async () => {
const folders = [
...Juke.glob('_maps/RandomRuins/**/*.dmm'),
...Juke.glob('_maps/RandomZLevels/**/*.dmm'),
...Juke.glob('_maps/shuttles/**/*.dmm'),
...Juke.glob('_maps/templates/**/*.dmm'),
];
const content = folders
.map((file) => file.replace('_maps/', ''))
.map((file) => `#include "${file}"`)
.join('\n') + '\n';
fs.writeFileSync('_maps/templates.dm', content);
},
});
export const DmTarget = new Juke.Target({
dependsOn: ({ get }) => [
get(DefineParameter).includes('ALL_MAPS') && DmMapsIncludeTarget,
],
inputs: [
'_maps/map_files/generic/**',
'code/**',
'goon/**',
'html/**',
'icons/**',
'interface/**',
`${DME_NAME}.dme`,
],
outputs: [
`${DME_NAME}.dmb`,
`${DME_NAME}.rsc`,
],
parameters: [DefineParameter],
executes: async ({ get }) => {
const defines = get(DefineParameter);
if (defines.length > 0)
|
await DreamMaker(`${DME_NAME}.dme`, {
defines: ['CBT', ...defines],
});
},
});
export const DmTestTarget = new Juke.Target({
dependsOn: ({ get }) => [
get(DefineParameter).includes('ALL_MAPS') && DmMapsIncludeTarget,
],
executes: async ({ get }) => {
const defines = get(DefineParameter);
if (defines.length > 0) {
Juke.logger.info('Using defines:', defines.join(', '));
}
fs.copyFileSync(`${DME_NAME}.dme`, `${DME_NAME}.test.dme`);
await DreamMaker(`${DME_NAME}.test.dme`, {
defines: ['CBT', 'CIBUILDING', ...defines],
});
Juke.rm('data/logs/ci', { recursive: true });
await DreamDaemon(
`${DME_NAME}.test.dmb`,
'-close', '-trusted', '-verbose',
'-params', 'log-directory=ci'
);
Juke.rm('*.test.*');
try {
const cleanRun = fs.readFileSync('data/logs/ci/clean_run.lk', 'utf-8');
console.log(cleanRun);
}
catch (err) {
Juke.logger.error('Test run was not clean, exiting');
throw new Juke.ExitCode(1);
}
},
});
export const YarnTarget = new Juke.Target({
inputs: [
'tgui/.yarn/+(cache|releases|plugins|sdks)/**/*',
'tgui/**/package.json',
'tgui/yarn.lock',
],
outputs: [
'tgui/.yarn/install-target',
],
executes: async () => {
await yarn('install');
},
});
export const TgFontTarget = new Juke.Target({
dependsOn: [YarnTarget],
inputs: [
'tgui/.yarn/install-target',
'tgui/packages/tgfont/**/*.+(js|cjs|svg)',
'tgui/packages/tgfont/package.json',
],
outputs: [
'tgui/packages/tgfont/dist/tgfont.css',
'tgui/packages/tgfont/dist/tgfont.eot',
'tgui/packages/tgfont/dist/tgfont.woff2',
],
executes: async () => {
await yarn('workspace', 'tgfont', 'build');
},
});
export const TguiTarget = new Juke.Target({
dependsOn: [YarnTarget],
inputs: [
'tgui/.yarn/install-target',
'tgui/webpack.config.js',
'tgui/**/package.json',
'tgui/packages/**/*.+(js|cjs|ts|tsx|scss)',
],
outputs: [
'tgui/public/tgui.bundle.css',
'tgui/public/tgui.bundle.js',
'tgui/public/tgui-panel.bundle.css',
'tgui/public/tgui-panel.bundle.js',
],
executes: async () => {
await yarn('webpack-cli', '--mode=production');
},
});
export const TguiEslintTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn(
'eslint', 'packages',
'--fix', '--ext', '.js,.cjs,.ts,.tsx',
...args
);
},
});
export const TguiTscTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async () => {
await yarn('tsc');
},
});
export const TguiTestTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn('jest', ...args);
},
});
export const TguiLintTarget = new Juke.Target({
dependsOn: [YarnTarget, TguiEslintTarget, TguiTscTarget, TguiTestTarget],
});
export const TguiDevTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn('node', 'packages/tgui-dev-server/index.js', ...args);
},
});
export const TguiAnalyzeTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async () => {
await yarn('webpack-cli', '--mode=production', '--analyze');
},
});
export const TestTarget = new Juke.Target({
dependsOn: [DmTestTarget, TguiTestTarget],
});
export const LintTarget = new Juke.Target({
dependsOn: [TguiLintTarget],
});
export const BuildTarget = new Juke.Target({
dependsOn: [TguiTarget, TgFontTarget, DmTarget],
});
export const ServerTarget = new Juke.Target({
dependsOn: [BuildTarget],
executes: async ({ get }) => {
const port = get(PortParameter) || '1337';
await DreamDaemon(`${DME_NAME}.dmb`, port, '-trusted');
},
});
export const AllTarget = new Juke.Target({
dependsOn: [TestTarget, LintTarget, BuildTarget],
});
/**
* Removes the immediate build junk to produce clean builds.
*/
export const CleanTarget = new Juke.Target({
executes: async () => {
Juke.rm('*.dmb');
Juke.rm('*.rsc');
Juke.rm('*.mdme');
Juke.rm('*.mdme*');
Juke.rm('*.m.*');
Juke.rm('_maps/templates.dm');
Juke.rm('tgui/public/.tmp', { recursive: true });
Juke.rm('tgui/public/*.map');
Juke.rm('tgui/public/*.chunk.*');
Juke.rm('tgui/public/*.bundle.*');
Juke.rm('tgui/public/*.hot-update.*');
Juke.rm('tgui/packages/tgfont/dist', { recursive: true });
Juke.rm('tgui/.yarn/cache', { recursive: true });
Juke.rm('tgui/.yarn/unplugged', { recursive: true });
Juke.rm('tgui/.yarn/webpack', { recursive: true });
Juke.rm('tgui/.yarn/build-state.yml');
Juke.rm('tgui/.yarn/install-state.gz');
Juke.rm('tgui/.yarn/install-target');
Juke.rm('tgui/.pnp.*');
},
});
/**
* Removes more junk at expense of much slower initial builds.
*/
export const DistCleanTarget = new Juke.Target({
dependsOn: [CleanTarget],
executes: async () => {
Juke.logger.info('Cleaning up data/logs');
Juke.rm('data/logs', { recursive: true });
Juke.logger.info('Cleaning up bootstrap cache');
Juke.rm('tools/bootstrap/.cache', { recursive: true });
Juke.logger.info('Cleaning up global yarn cache');
await yarn('cache', 'clean', '--all');
},
});
/**
* Prepends the defines to the .dme.
* Does not clean them up, as this is intended for TGS which
* clones new copies anyway.
*/
const prependDefines = (...defines) => {
const dmeContents = fs.readFileSync(`${DME_NAME}.dme`);
const textToWrite = defines.map(define => `#define ${define}\n`);
fs.writeFileSync(`${DME_NAME}.dme`, `${textToWrite}\n${dmeContents}`);
};
export const TgsTarget = new Juke.Target({
dependsOn: [TguiTarget, TgFontTarget],
executes: async () => {
Juke.logger.info('Prepending TGS define');
prependDefines('TGS');
},
});
const TGS_MODE = process.env.CBT_BUILD_MODE === 'TGS';
export default TGS_MODE ? TgsTarget : BuildTarget;
|
{
Juke.logger.info('Using defines:', defines.join(', '));
}
|
conditional_block
|
build.js
|
#!/usr/bin/env node
/**
* Build script for /tg/station 13 codebase.
*
* This script uses Juke Build, read the docs here:
* https://github.com/stylemistake/juke-build
*
* @file
* @copyright 2021 Aleksej Komarov
* @license MIT
*/
import fs from 'fs';
import { DreamDaemon, DreamMaker } from './lib/byond.js';
import { yarn } from './lib/yarn.js';
import Juke from './juke/index.js';
Juke.chdir('../..', import.meta.url);
Juke.setup({ file: import.meta.url }).then((code) => process.exit(code));
const DME_NAME = 'tgstation';
export const DefineParameter = new Juke.Parameter({
type: 'string[]',
alias: 'D',
});
export const PortParameter = new Juke.Parameter({
type: 'string',
alias: 'p',
});
export const CiParameter = new Juke.Parameter({
type: 'boolean',
});
export const DmMapsIncludeTarget = new Juke.Target({
executes: async () => {
const folders = [
...Juke.glob('_maps/RandomRuins/**/*.dmm'),
...Juke.glob('_maps/RandomZLevels/**/*.dmm'),
...Juke.glob('_maps/shuttles/**/*.dmm'),
...Juke.glob('_maps/templates/**/*.dmm'),
];
const content = folders
.map((file) => file.replace('_maps/', ''))
.map((file) => `#include "${file}"`)
.join('\n') + '\n';
fs.writeFileSync('_maps/templates.dm', content);
},
});
export const DmTarget = new Juke.Target({
dependsOn: ({ get }) => [
get(DefineParameter).includes('ALL_MAPS') && DmMapsIncludeTarget,
],
inputs: [
'_maps/map_files/generic/**',
'code/**',
'goon/**',
'html/**',
'icons/**',
'interface/**',
`${DME_NAME}.dme`,
],
outputs: [
`${DME_NAME}.dmb`,
`${DME_NAME}.rsc`,
],
parameters: [DefineParameter],
executes: async ({ get }) => {
const defines = get(DefineParameter);
if (defines.length > 0) {
Juke.logger.info('Using defines:', defines.join(', '));
}
await DreamMaker(`${DME_NAME}.dme`, {
defines: ['CBT', ...defines],
});
},
});
export const DmTestTarget = new Juke.Target({
dependsOn: ({ get }) => [
get(DefineParameter).includes('ALL_MAPS') && DmMapsIncludeTarget,
],
executes: async ({ get }) => {
const defines = get(DefineParameter);
if (defines.length > 0) {
Juke.logger.info('Using defines:', defines.join(', '));
}
fs.copyFileSync(`${DME_NAME}.dme`, `${DME_NAME}.test.dme`);
await DreamMaker(`${DME_NAME}.test.dme`, {
defines: ['CBT', 'CIBUILDING', ...defines],
});
Juke.rm('data/logs/ci', { recursive: true });
await DreamDaemon(
`${DME_NAME}.test.dmb`,
'-close', '-trusted', '-verbose',
'-params', 'log-directory=ci'
);
Juke.rm('*.test.*');
try {
const cleanRun = fs.readFileSync('data/logs/ci/clean_run.lk', 'utf-8');
console.log(cleanRun);
}
catch (err) {
Juke.logger.error('Test run was not clean, exiting');
throw new Juke.ExitCode(1);
}
},
});
export const YarnTarget = new Juke.Target({
inputs: [
'tgui/.yarn/+(cache|releases|plugins|sdks)/**/*',
'tgui/**/package.json',
'tgui/yarn.lock',
],
outputs: [
'tgui/.yarn/install-target',
],
executes: async () => {
await yarn('install');
},
});
export const TgFontTarget = new Juke.Target({
dependsOn: [YarnTarget],
inputs: [
'tgui/.yarn/install-target',
'tgui/packages/tgfont/**/*.+(js|cjs|svg)',
'tgui/packages/tgfont/package.json',
],
outputs: [
'tgui/packages/tgfont/dist/tgfont.css',
'tgui/packages/tgfont/dist/tgfont.eot',
'tgui/packages/tgfont/dist/tgfont.woff2',
],
executes: async () => {
await yarn('workspace', 'tgfont', 'build');
},
});
export const TguiTarget = new Juke.Target({
dependsOn: [YarnTarget],
inputs: [
'tgui/.yarn/install-target',
'tgui/webpack.config.js',
'tgui/**/package.json',
'tgui/packages/**/*.+(js|cjs|ts|tsx|scss)',
],
outputs: [
'tgui/public/tgui.bundle.css',
'tgui/public/tgui.bundle.js',
'tgui/public/tgui-panel.bundle.css',
'tgui/public/tgui-panel.bundle.js',
],
executes: async () => {
await yarn('webpack-cli', '--mode=production');
},
});
export const TguiEslintTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn(
'eslint', 'packages',
'--fix', '--ext', '.js,.cjs,.ts,.tsx',
...args
);
},
});
export const TguiTscTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async () => {
await yarn('tsc');
},
});
export const TguiTestTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn('jest', ...args);
},
});
export const TguiLintTarget = new Juke.Target({
dependsOn: [YarnTarget, TguiEslintTarget, TguiTscTarget, TguiTestTarget],
});
export const TguiDevTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async ({ args }) => {
await yarn('node', 'packages/tgui-dev-server/index.js', ...args);
},
});
export const TguiAnalyzeTarget = new Juke.Target({
dependsOn: [YarnTarget],
executes: async () => {
await yarn('webpack-cli', '--mode=production', '--analyze');
},
});
export const TestTarget = new Juke.Target({
dependsOn: [DmTestTarget, TguiTestTarget],
});
export const LintTarget = new Juke.Target({
dependsOn: [TguiLintTarget],
});
export const BuildTarget = new Juke.Target({
dependsOn: [TguiTarget, TgFontTarget, DmTarget],
});
export const ServerTarget = new Juke.Target({
dependsOn: [BuildTarget],
executes: async ({ get }) => {
const port = get(PortParameter) || '1337';
await DreamDaemon(`${DME_NAME}.dmb`, port, '-trusted');
},
});
export const AllTarget = new Juke.Target({
dependsOn: [TestTarget, LintTarget, BuildTarget],
});
/**
* Removes the immediate build junk to produce clean builds.
*/
export const CleanTarget = new Juke.Target({
executes: async () => {
Juke.rm('*.dmb');
Juke.rm('*.rsc');
Juke.rm('*.mdme');
Juke.rm('*.mdme*');
Juke.rm('*.m.*');
Juke.rm('_maps/templates.dm');
Juke.rm('tgui/public/.tmp', { recursive: true });
Juke.rm('tgui/public/*.map');
|
Juke.rm('tgui/public/*.bundle.*');
Juke.rm('tgui/public/*.hot-update.*');
Juke.rm('tgui/packages/tgfont/dist', { recursive: true });
Juke.rm('tgui/.yarn/cache', { recursive: true });
Juke.rm('tgui/.yarn/unplugged', { recursive: true });
Juke.rm('tgui/.yarn/webpack', { recursive: true });
Juke.rm('tgui/.yarn/build-state.yml');
Juke.rm('tgui/.yarn/install-state.gz');
Juke.rm('tgui/.yarn/install-target');
Juke.rm('tgui/.pnp.*');
},
});
/**
* Removes more junk at expense of much slower initial builds.
*/
export const DistCleanTarget = new Juke.Target({
dependsOn: [CleanTarget],
executes: async () => {
Juke.logger.info('Cleaning up data/logs');
Juke.rm('data/logs', { recursive: true });
Juke.logger.info('Cleaning up bootstrap cache');
Juke.rm('tools/bootstrap/.cache', { recursive: true });
Juke.logger.info('Cleaning up global yarn cache');
await yarn('cache', 'clean', '--all');
},
});
/**
* Prepends the defines to the .dme.
* Does not clean them up, as this is intended for TGS which
* clones new copies anyway.
*/
const prependDefines = (...defines) => {
const dmeContents = fs.readFileSync(`${DME_NAME}.dme`);
const textToWrite = defines.map(define => `#define ${define}\n`);
fs.writeFileSync(`${DME_NAME}.dme`, `${textToWrite}\n${dmeContents}`);
};
export const TgsTarget = new Juke.Target({
dependsOn: [TguiTarget, TgFontTarget],
executes: async () => {
Juke.logger.info('Prepending TGS define');
prependDefines('TGS');
},
});
const TGS_MODE = process.env.CBT_BUILD_MODE === 'TGS';
export default TGS_MODE ? TgsTarget : BuildTarget;
|
Juke.rm('tgui/public/*.chunk.*');
|
random_line_split
|
navMenu.tsx
|
import * as React from 'react';
import * as ReactDOM from 'react-dom'
import { Link } from 'react-router-dom';
import { Router } from 'react-router';
import { Components, Services } from './../../root';
import { observer } from './../../mx';
@observer
export class NavMenu extends React.Component<any, any> {
public async
|
() {
var user = await Services.CurrentUserService.get();
}
public render() {
var progress = undefined;
if (Services.MessengerService.numberOfPendingHttpRequest != 0)
progress = <div role="progressbar" className="mdc-linear-progress mdc-linear-progress--indeterminate mdc-linear-progress--accent progress">
<div className="mdc-linear-progress__buffering-dots"></div>
<div className="mdc-linear-progress__buffer"></div>
<div className="mdc-linear-progress__bar mdc-linear-progress__primary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
<div className="mdc-linear-progress__bar mdc-linear-progress__secondary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
</div>;
return (<div>
<header className="mdc-toolbar mdc-toolbar--fixed mdc-toolbar--waterfall">
<div className="mdc-toolbar__row">
<section className="mdc-toolbar__section mdc-toolbar__section--align-start">
<a href="/" className="mdc-toolbar__title">React</a>
</section>
<section className="mdc-toolbar__section mdc-toolbar__section--align-end" role="toolbar">
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Home"
href="/#/home"><i className='mdi mdi-home'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Users"
href="/#/user-list"><i className='mdi mdi-account-multiple'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Scratch"
href="/#/scratch"><i className='mdi mdi-theater'></i></a>
</section>
</div>
<div className="progress-container">
{progress}
</div>
</header>
</div>
);
}
}
|
componentDidMount
|
identifier_name
|
navMenu.tsx
|
import * as React from 'react';
import * as ReactDOM from 'react-dom'
import { Link } from 'react-router-dom';
import { Router } from 'react-router';
import { Components, Services } from './../../root';
import { observer } from './../../mx';
@observer
export class NavMenu extends React.Component<any, any> {
public async componentDidMount() {
var user = await Services.CurrentUserService.get();
}
public render() {
var progress = undefined;
if (Services.MessengerService.numberOfPendingHttpRequest != 0)
progress = <div role="progressbar" className="mdc-linear-progress mdc-linear-progress--indeterminate mdc-linear-progress--accent progress">
<div className="mdc-linear-progress__buffering-dots"></div>
<div className="mdc-linear-progress__buffer"></div>
<div className="mdc-linear-progress__bar mdc-linear-progress__primary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
|
<div className="mdc-linear-progress__bar mdc-linear-progress__secondary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
</div>;
return (<div>
<header className="mdc-toolbar mdc-toolbar--fixed mdc-toolbar--waterfall">
<div className="mdc-toolbar__row">
<section className="mdc-toolbar__section mdc-toolbar__section--align-start">
<a href="/" className="mdc-toolbar__title">React</a>
</section>
<section className="mdc-toolbar__section mdc-toolbar__section--align-end" role="toolbar">
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Home"
href="/#/home"><i className='mdi mdi-home'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Users"
href="/#/user-list"><i className='mdi mdi-account-multiple'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Scratch"
href="/#/scratch"><i className='mdi mdi-theater'></i></a>
</section>
</div>
<div className="progress-container">
{progress}
</div>
</header>
</div>
);
}
}
|
random_line_split
|
|
navMenu.tsx
|
import * as React from 'react';
import * as ReactDOM from 'react-dom'
import { Link } from 'react-router-dom';
import { Router } from 'react-router';
import { Components, Services } from './../../root';
import { observer } from './../../mx';
@observer
export class NavMenu extends React.Component<any, any> {
public async componentDidMount()
|
public render() {
var progress = undefined;
if (Services.MessengerService.numberOfPendingHttpRequest != 0)
progress = <div role="progressbar" className="mdc-linear-progress mdc-linear-progress--indeterminate mdc-linear-progress--accent progress">
<div className="mdc-linear-progress__buffering-dots"></div>
<div className="mdc-linear-progress__buffer"></div>
<div className="mdc-linear-progress__bar mdc-linear-progress__primary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
<div className="mdc-linear-progress__bar mdc-linear-progress__secondary-bar">
<span className="mdc-linear-progress__bar-inner"></span>
</div>
</div>;
return (<div>
<header className="mdc-toolbar mdc-toolbar--fixed mdc-toolbar--waterfall">
<div className="mdc-toolbar__row">
<section className="mdc-toolbar__section mdc-toolbar__section--align-start">
<a href="/" className="mdc-toolbar__title">React</a>
</section>
<section className="mdc-toolbar__section mdc-toolbar__section--align-end" role="toolbar">
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Home"
href="/#/home"><i className='mdi mdi-home'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Users"
href="/#/user-list"><i className='mdi mdi-account-multiple'></i></a>
<a className="toolbar-button material-icons mdc-toolbar__icon" title="Scratch"
href="/#/scratch"><i className='mdi mdi-theater'></i></a>
</section>
</div>
<div className="progress-container">
{progress}
</div>
</header>
</div>
);
}
}
|
{
var user = await Services.CurrentUserService.get();
}
|
identifier_body
|
sortedIndex.js
|
/**
* Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/>
* Build: `lodash modularize modern exports="amd" -o ./modern/`
* Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/>
* Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE>
* Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
* Available under MIT license <http://lodash.com/license>
*/
define(['../functions/createCallback', '../utilities/identity'], function(createCallback, identity) {
/**
* Uses a binary search to determine the smallest index at which a value
* should be inserted into a given sorted array in order to maintain the sort
* order of the array. If a callback is provided it will be executed for
* `value` and each element of `array` to compute their sort ranking. The
* callback is bound to `thisArg` and invoked with one argument; (value).
*
* If a property name is provided for `callback` the created "_.pluck" style
* callback will return the property value of the given element.
*
* If an object is provided for `callback` the created "_.where" style callback
* will return `true` for elements that have the properties of the given object,
* else `false`.
*
* @static
* @memberOf _
* @category Arrays
* @param {Array} array The array to inspect.
* @param {*} value The value to evaluate.
* @param {Function|Object|string} [callback=identity] The function called
* per iteration. If a property name or object is provided it will be used
* to create a "_.pluck" or "_.where" style callback, respectively.
* @param {*} [thisArg] The `this` binding of `callback`.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([20, 30, 50], 40);
* // => 2
*
* // using "_.pluck" callback shorthand
* _.sortedIndex([{ 'x': 20 }, { 'x': 30 }, { 'x': 50 }], { 'x': 40 }, 'x');
* // => 2
*
* var dict = {
* 'wordToNumber': { 'twenty': 20, 'thirty': 30, 'fourty': 40, 'fifty': 50 }
* };
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return dict.wordToNumber[word];
* });
* // => 2
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return this.wordToNumber[word];
* }, dict);
* // => 2
*/
function sortedIndex(array, value, callback, thisArg) {
var low = 0,
high = array ? array.length : low;
// explicitly reference `identity` for better inlining in Firefox
|
while (low < high) {
var mid = (low + high) >>> 1;
(callback(array[mid]) < value)
? low = mid + 1
: high = mid;
}
return low;
}
return sortedIndex;
});
|
callback = callback ? createCallback(callback, thisArg, 1) : identity;
value = callback(value);
|
random_line_split
|
sortedIndex.js
|
/**
* Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/>
* Build: `lodash modularize modern exports="amd" -o ./modern/`
* Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/>
* Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE>
* Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
* Available under MIT license <http://lodash.com/license>
*/
define(['../functions/createCallback', '../utilities/identity'], function(createCallback, identity) {
/**
* Uses a binary search to determine the smallest index at which a value
* should be inserted into a given sorted array in order to maintain the sort
* order of the array. If a callback is provided it will be executed for
* `value` and each element of `array` to compute their sort ranking. The
* callback is bound to `thisArg` and invoked with one argument; (value).
*
* If a property name is provided for `callback` the created "_.pluck" style
* callback will return the property value of the given element.
*
* If an object is provided for `callback` the created "_.where" style callback
* will return `true` for elements that have the properties of the given object,
* else `false`.
*
* @static
* @memberOf _
* @category Arrays
* @param {Array} array The array to inspect.
* @param {*} value The value to evaluate.
* @param {Function|Object|string} [callback=identity] The function called
* per iteration. If a property name or object is provided it will be used
* to create a "_.pluck" or "_.where" style callback, respectively.
* @param {*} [thisArg] The `this` binding of `callback`.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([20, 30, 50], 40);
* // => 2
*
* // using "_.pluck" callback shorthand
* _.sortedIndex([{ 'x': 20 }, { 'x': 30 }, { 'x': 50 }], { 'x': 40 }, 'x');
* // => 2
*
* var dict = {
* 'wordToNumber': { 'twenty': 20, 'thirty': 30, 'fourty': 40, 'fifty': 50 }
* };
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return dict.wordToNumber[word];
* });
* // => 2
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return this.wordToNumber[word];
* }, dict);
* // => 2
*/
function sortedIndex(array, value, callback, thisArg) {
var low = 0,
high = array ? array.length : low;
// explicitly reference `identity` for better inlining in Firefox
callback = callback ? createCallback(callback, thisArg, 1) : identity;
value = callback(value);
while (low < high)
|
return low;
}
return sortedIndex;
});
|
{
var mid = (low + high) >>> 1;
(callback(array[mid]) < value)
? low = mid + 1
: high = mid;
}
|
conditional_block
|
sortedIndex.js
|
/**
* Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/>
* Build: `lodash modularize modern exports="amd" -o ./modern/`
* Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/>
* Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE>
* Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
* Available under MIT license <http://lodash.com/license>
*/
define(['../functions/createCallback', '../utilities/identity'], function(createCallback, identity) {
/**
* Uses a binary search to determine the smallest index at which a value
* should be inserted into a given sorted array in order to maintain the sort
* order of the array. If a callback is provided it will be executed for
* `value` and each element of `array` to compute their sort ranking. The
* callback is bound to `thisArg` and invoked with one argument; (value).
*
* If a property name is provided for `callback` the created "_.pluck" style
* callback will return the property value of the given element.
*
* If an object is provided for `callback` the created "_.where" style callback
* will return `true` for elements that have the properties of the given object,
* else `false`.
*
* @static
* @memberOf _
* @category Arrays
* @param {Array} array The array to inspect.
* @param {*} value The value to evaluate.
* @param {Function|Object|string} [callback=identity] The function called
* per iteration. If a property name or object is provided it will be used
* to create a "_.pluck" or "_.where" style callback, respectively.
* @param {*} [thisArg] The `this` binding of `callback`.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([20, 30, 50], 40);
* // => 2
*
* // using "_.pluck" callback shorthand
* _.sortedIndex([{ 'x': 20 }, { 'x': 30 }, { 'x': 50 }], { 'x': 40 }, 'x');
* // => 2
*
* var dict = {
* 'wordToNumber': { 'twenty': 20, 'thirty': 30, 'fourty': 40, 'fifty': 50 }
* };
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return dict.wordToNumber[word];
* });
* // => 2
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return this.wordToNumber[word];
* }, dict);
* // => 2
*/
function sortedIndex(array, value, callback, thisArg)
|
return sortedIndex;
});
|
{
var low = 0,
high = array ? array.length : low;
// explicitly reference `identity` for better inlining in Firefox
callback = callback ? createCallback(callback, thisArg, 1) : identity;
value = callback(value);
while (low < high) {
var mid = (low + high) >>> 1;
(callback(array[mid]) < value)
? low = mid + 1
: high = mid;
}
return low;
}
|
identifier_body
|
sortedIndex.js
|
/**
* Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/>
* Build: `lodash modularize modern exports="amd" -o ./modern/`
* Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/>
* Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE>
* Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
* Available under MIT license <http://lodash.com/license>
*/
define(['../functions/createCallback', '../utilities/identity'], function(createCallback, identity) {
/**
* Uses a binary search to determine the smallest index at which a value
* should be inserted into a given sorted array in order to maintain the sort
* order of the array. If a callback is provided it will be executed for
* `value` and each element of `array` to compute their sort ranking. The
* callback is bound to `thisArg` and invoked with one argument; (value).
*
* If a property name is provided for `callback` the created "_.pluck" style
* callback will return the property value of the given element.
*
* If an object is provided for `callback` the created "_.where" style callback
* will return `true` for elements that have the properties of the given object,
* else `false`.
*
* @static
* @memberOf _
* @category Arrays
* @param {Array} array The array to inspect.
* @param {*} value The value to evaluate.
* @param {Function|Object|string} [callback=identity] The function called
* per iteration. If a property name or object is provided it will be used
* to create a "_.pluck" or "_.where" style callback, respectively.
* @param {*} [thisArg] The `this` binding of `callback`.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([20, 30, 50], 40);
* // => 2
*
* // using "_.pluck" callback shorthand
* _.sortedIndex([{ 'x': 20 }, { 'x': 30 }, { 'x': 50 }], { 'x': 40 }, 'x');
* // => 2
*
* var dict = {
* 'wordToNumber': { 'twenty': 20, 'thirty': 30, 'fourty': 40, 'fifty': 50 }
* };
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return dict.wordToNumber[word];
* });
* // => 2
*
* _.sortedIndex(['twenty', 'thirty', 'fifty'], 'fourty', function(word) {
* return this.wordToNumber[word];
* }, dict);
* // => 2
*/
function
|
(array, value, callback, thisArg) {
var low = 0,
high = array ? array.length : low;
// explicitly reference `identity` for better inlining in Firefox
callback = callback ? createCallback(callback, thisArg, 1) : identity;
value = callback(value);
while (low < high) {
var mid = (low + high) >>> 1;
(callback(array[mid]) < value)
? low = mid + 1
: high = mid;
}
return low;
}
return sortedIndex;
});
|
sortedIndex
|
identifier_name
|
worddisplayvm.js
|
///<reference path="./otmword.ts" />
///<reference path="./wmmodules.ts" />
///<reference path="./wgenerator.ts" />
///<reference path="./ntdialog.ts" />
/**
* 単語作成部で使用するViewModel
*/
class WordDisplayVM {
/**
* コンストラクタ
* @param el バインディングを適用するタグのid
* @param dict OTM形式辞書クラス
* @param createSetting 単語文字列作成に使用する設定
*/
constructor(el, dict, createSetting, equivalent) {
this.el = el;
this.data = {
dictionary: dict,
isDisabled: false,
createSetting: createSetting,
id: 1,
equivalent: equivalent,
};
this.initMethods();
}
/**
* VMで使用するメソッドを定義するメソッド
*/
initMethods() {
this.methods = {
/**
* 単語文字列を作成するメソッド
*/
create: function _create() {
let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add("");
this.dictionary.add(word);
},
/**
* 設定されている全ての訳語に対して単語を作成するメソッド
*/
createAll: function _createAll() {
this.equivalent.equivalentsList.data.forEach((x) => {
let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add(x.equivalents.join(","));
this.dictionary.add(word);
});
},
/**
* 作成した全ての単語を削除するメソッド
*/
removeAll: function _removeAll() {
this.dictionary.removeAll();
// idを初期値にする
this.id = 1;
|
/**
* 作成した単語一覧をOTM-JSON形式で出力するメソッド
*/
outputOtmJSON: function _outputOtmJSON() {
// idを振り直す
let id = 1;
this.dictionary.words.forEach((x) => {
x.entry.id = id++;
});
WMModules.exportJSON(this.dictionary, "dict.json");
// 引き続き作成する場合を考えてidを更新する
this.id = id;
},
// 個々で使用する部分
/**
* 訳語選択ダイアログを呼び出すメソッド
* @param 訳語を設定する単語クラス
*/
showEquivalentDialog: function _showEquivalentDialog(word) {
this.equivalent.selectedWordId = word.entry.id.toString();
WMModules.equivalentDialog.show();
},
/**
* 単語を削除するメソッド
* @param 削除する単語クラス
*/
remove: function _remove(word) {
this.dictionary.remove(word.entry.id);
},
/**
* 単語の区切りの","で文字列を区切って配列にするためのメソッド
* @param 単語の訳語(カンマ区切り)
* @return カンマを区切り文字として分割した結果の文字列配列
*/
splitter: function _splitter(value) {
return value.split(",").map(function (x) { return x.trim(); });
},
};
}
}
//# sourceMappingURL=worddisplayvm.js.map
|
},
|
random_line_split
|
worddisplayvm.js
|
///<reference path="./otmword.ts" />
///<reference path="./wmmodules.ts" />
///<reference path="./wgenerator.ts" />
///<reference path="./ntdialog.ts" />
/**
* 単語作成部で使用するViewModel
*/
class WordDisplayVM {
/**
* コンストラクタ
* @param el バインディングを適用するタグのid
* @param dict OTM形式辞書クラス
* @param createSetting 単語文字列作成に使用する設定
*/
constructor(el, dict, createSetting, equivalent) {
this.el = el;
this.data = {
dictionary: dict,
isDisabled: false,
createSetting: createSetting,
id: 1,
equivalent: equivalent,
};
this.initMethods();
}
/**
* VMで使用するメソッドを定義するメソッド
*/
initMethods() {
this.methods = {
/**
* 単語文字列を作成するメソッド
*/
create: function _crea
|
let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add("");
this.dictionary.add(word);
},
/**
* 設定されている全ての訳語に対して単語を作成するメソッド
*/
createAll: function _createAll() {
this.equivalent.equivalentsList.data.forEach((x) => {
let form = "";
switch (this.createSetting.mode) {
case WordGenerator.SIMPLE_SYMBOL:
form = WordGenerator.simple(this.createSetting.simple);
break;
case WordGenerator.SIMPLECV_SYMBOL:
form = WordGenerator.simplecv(this.createSetting.simplecv);
break;
case WordGenerator.DEPENDENCYCV_SYMBOL:
form = WordGenerator.dependencycv(this.createSetting.dependencycv);
break;
default:
break;
}
let word = new OtmWord(this.id++, form);
word.add(x.equivalents.join(","));
this.dictionary.add(word);
});
},
/**
* 作成した全ての単語を削除するメソッド
*/
removeAll: function _removeAll() {
this.dictionary.removeAll();
// idを初期値にする
this.id = 1;
},
/**
* 作成した単語一覧をOTM-JSON形式で出力するメソッド
*/
outputOtmJSON: function _outputOtmJSON() {
// idを振り直す
let id = 1;
this.dictionary.words.forEach((x) => {
x.entry.id = id++;
});
WMModules.exportJSON(this.dictionary, "dict.json");
// 引き続き作成する場合を考えてidを更新する
this.id = id;
},
// 個々で使用する部分
/**
* 訳語選択ダイアログを呼び出すメソッド
* @param 訳語を設定する単語クラス
*/
showEquivalentDialog: function _showEquivalentDialog(word) {
this.equivalent.selectedWordId = word.entry.id.toString();
WMModules.equivalentDialog.show();
},
/**
* 単語を削除するメソッド
* @param 削除する単語クラス
*/
remove: function _remove(word) {
this.dictionary.remove(word.entry.id);
},
/**
* 単語の区切りの","で文字列を区切って配列にするためのメソッド
* @param 単語の訳語(カンマ区切り)
* @return カンマを区切り文字として分割した結果の文字列配列
*/
splitter: function _splitter(value) {
return value.split(",").map(function (x) { return x.trim(); });
},
};
}
}
//# sourceMappingURL=worddisplayvm.js.map
|
te() {
|
identifier_name
|
test_plugin_slurm.py
|
# pylint: disable=missing-docstring
# this fails on Python 2.6 but Slurm environment is 2.7
import unittest
from datetime import datetime
from reporting.plugins.slurm import SlurmInput
class SlurmTestCase(unittest.TestCase):
"""Test cases for slurm module"""
def test_all_heros(self):
"""Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
for job in data['jobs']:
self.assertTrue(job['user'].startswith('hero'))
def test_get_data(self):
"""Slurm plugin: get_data method should return a message in correct structure"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
self.assertIn('hostname', data)
self.assertIn('timestamp', data)
self.assertIn('jobs', data)
self.assertTrue(isinstance(data['jobs'], list))
job = data['jobs'][0]
for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'):
self.assertIn(required_key, job)
def test_read_data(self):
"""Slurm plugin: _read_data should only return job summary not steps, those do not have User value"""
data = SlurmInput._read_data('tests/sacct-with-start-end.txt')
qualified_count = len(data)
for message in data:
if 'user' in message and len(message['user'].strip()):
qualified_count -= 1
self.assertEqual(qualified_count, 0)
def test_convert_to_timestamp(self):
|
"""Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly"""
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
reference = datetime.utcnow().strftime(ISO_FORMAT)
converted = datetime.utcfromtimestamp(
SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT)
self.assertEqual(reference, converted)
|
identifier_body
|
|
test_plugin_slurm.py
|
# pylint: disable=missing-docstring
# this fails on Python 2.6 but Slurm environment is 2.7
import unittest
from datetime import datetime
from reporting.plugins.slurm import SlurmInput
class SlurmTestCase(unittest.TestCase):
"""Test cases for slurm module"""
def test_all_heros(self):
"""Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
for job in data['jobs']:
self.assertTrue(job['user'].startswith('hero'))
def test_get_data(self):
"""Slurm plugin: get_data method should return a message in correct structure"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
self.assertIn('hostname', data)
self.assertIn('timestamp', data)
self.assertIn('jobs', data)
self.assertTrue(isinstance(data['jobs'], list))
job = data['jobs'][0]
for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'):
self.assertIn(required_key, job)
def test_read_data(self):
"""Slurm plugin: _read_data should only return job summary not steps, those do not have User value"""
data = SlurmInput._read_data('tests/sacct-with-start-end.txt')
qualified_count = len(data)
for message in data:
|
self.assertEqual(qualified_count, 0)
def test_convert_to_timestamp(self):
"""Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly"""
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
reference = datetime.utcnow().strftime(ISO_FORMAT)
converted = datetime.utcfromtimestamp(
SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT)
self.assertEqual(reference, converted)
|
if 'user' in message and len(message['user'].strip()):
qualified_count -= 1
|
conditional_block
|
test_plugin_slurm.py
|
# pylint: disable=missing-docstring
# this fails on Python 2.6 but Slurm environment is 2.7
import unittest
from datetime import datetime
from reporting.plugins.slurm import SlurmInput
class SlurmTestCase(unittest.TestCase):
"""Test cases for slurm module"""
def test_all_heros(self):
"""Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
for job in data['jobs']:
self.assertTrue(job['user'].startswith('hero'))
def test_get_data(self):
"""Slurm plugin: get_data method should return a message in correct structure"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
self.assertIn('hostname', data)
self.assertIn('timestamp', data)
self.assertIn('jobs', data)
self.assertTrue(isinstance(data['jobs'], list))
job = data['jobs'][0]
for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'):
self.assertIn(required_key, job)
def
|
(self):
"""Slurm plugin: _read_data should only return job summary not steps, those do not have User value"""
data = SlurmInput._read_data('tests/sacct-with-start-end.txt')
qualified_count = len(data)
for message in data:
if 'user' in message and len(message['user'].strip()):
qualified_count -= 1
self.assertEqual(qualified_count, 0)
def test_convert_to_timestamp(self):
"""Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly"""
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
reference = datetime.utcnow().strftime(ISO_FORMAT)
converted = datetime.utcfromtimestamp(
SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT)
self.assertEqual(reference, converted)
|
test_read_data
|
identifier_name
|
test_plugin_slurm.py
|
# pylint: disable=missing-docstring
# this fails on Python 2.6 but Slurm environment is 2.7
import unittest
from datetime import datetime
from reporting.plugins.slurm import SlurmInput
class SlurmTestCase(unittest.TestCase):
"""Test cases for slurm module"""
def test_all_heros(self):
"""Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
for job in data['jobs']:
self.assertTrue(job['user'].startswith('hero'))
def test_get_data(self):
"""Slurm plugin: get_data method should return a message in correct structure"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
|
self.assertTrue(isinstance(data['jobs'], list))
job = data['jobs'][0]
for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'):
self.assertIn(required_key, job)
def test_read_data(self):
"""Slurm plugin: _read_data should only return job summary not steps, those do not have User value"""
data = SlurmInput._read_data('tests/sacct-with-start-end.txt')
qualified_count = len(data)
for message in data:
if 'user' in message and len(message['user'].strip()):
qualified_count -= 1
self.assertEqual(qualified_count, 0)
def test_convert_to_timestamp(self):
"""Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly"""
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
reference = datetime.utcnow().strftime(ISO_FORMAT)
converted = datetime.utcfromtimestamp(
SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT)
self.assertEqual(reference, converted)
|
data = slurm_input.get_data()
self.assertIn('hostname', data)
self.assertIn('timestamp', data)
self.assertIn('jobs', data)
|
random_line_split
|
caching-interceptor.ts
|
import { Injectable } from '@angular/core';
import {
HttpEvent, HttpHeaders, HttpRequest, HttpResponse,
HttpInterceptor, HttpHandler
} from '@angular/common/http';
import { Observable, of } from 'rxjs';
import { startWith, tap } from 'rxjs/operators';
import { RequestCache } from '../request-cache.service';
import { searchUrl } from '../package-search/package-search.service';
/**
* If request is cachable (e.g., package search) and
* response is in cache return the cached response as observable.
* If has 'x-refresh' header that is true,
* then also re-run the package search, using response from next(),
* returning an observable that emits the cached response first.
*
* If not in cache or not cachable,
* pass request through to next()
*/
@Injectable()
export class CachingInterceptor implements HttpInterceptor {
constructor(private cache: RequestCache)
|
intercept(req: HttpRequest<any>, next: HttpHandler) {
// continue if not cachable.
if (!isCachable(req)) { return next.handle(req); }
const cachedResponse = this.cache.get(req);
// cache-then-refresh
if (req.headers.get('x-refresh')) {
const results$ = sendRequest(req, next, this.cache);
return cachedResponse ?
results$.pipe( startWith(cachedResponse) ) :
results$;
}
// cache-or-fetch
return cachedResponse ?
of(cachedResponse) : sendRequest(req, next, this.cache);
}
}
/** Is this request cachable? */
function isCachable(req: HttpRequest<any>) {
// Only GET requests are cachable
return req.method === 'GET' &&
// Only npm package search is cachable in this app
-1 < req.url.indexOf(searchUrl);
}
/**
* Get server response observable by sending request to `next()`.
* Will add the response to the cache on the way out.
*/
function sendRequest(
req: HttpRequest<any>,
next: HttpHandler,
cache: RequestCache): Observable<HttpEvent<any>> {
// No headers allowed in npm search request
const noHeaderReq = req.clone({ headers: new HttpHeaders() });
return next.handle(noHeaderReq).pipe(
tap(event => {
// There may be other events besides the response.
if (event instanceof HttpResponse) {
cache.put(req, event); // Update the cache.
}
})
);
}
|
{}
|
identifier_body
|
caching-interceptor.ts
|
import { Injectable } from '@angular/core';
import {
HttpEvent, HttpHeaders, HttpRequest, HttpResponse,
HttpInterceptor, HttpHandler
} from '@angular/common/http';
import { Observable, of } from 'rxjs';
import { startWith, tap } from 'rxjs/operators';
import { RequestCache } from '../request-cache.service';
import { searchUrl } from '../package-search/package-search.service';
/**
* If request is cachable (e.g., package search) and
* response is in cache return the cached response as observable.
* If has 'x-refresh' header that is true,
* then also re-run the package search, using response from next(),
* returning an observable that emits the cached response first.
*
* If not in cache or not cachable,
* pass request through to next()
*/
@Injectable()
export class CachingInterceptor implements HttpInterceptor {
constructor(private cache: RequestCache) {}
intercept(req: HttpRequest<any>, next: HttpHandler) {
// continue if not cachable.
if (!isCachable(req)) { return next.handle(req); }
const cachedResponse = this.cache.get(req);
// cache-then-refresh
if (req.headers.get('x-refresh')) {
const results$ = sendRequest(req, next, this.cache);
return cachedResponse ?
results$.pipe( startWith(cachedResponse) ) :
results$;
}
// cache-or-fetch
return cachedResponse ?
of(cachedResponse) : sendRequest(req, next, this.cache);
}
}
/** Is this request cachable? */
function isCachable(req: HttpRequest<any>) {
// Only GET requests are cachable
return req.method === 'GET' &&
// Only npm package search is cachable in this app
-1 < req.url.indexOf(searchUrl);
}
/**
* Get server response observable by sending request to `next()`.
* Will add the response to the cache on the way out.
*/
function sendRequest(
req: HttpRequest<any>,
next: HttpHandler,
|
return next.handle(noHeaderReq).pipe(
tap(event => {
// There may be other events besides the response.
if (event instanceof HttpResponse) {
cache.put(req, event); // Update the cache.
}
})
);
}
|
cache: RequestCache): Observable<HttpEvent<any>> {
// No headers allowed in npm search request
const noHeaderReq = req.clone({ headers: new HttpHeaders() });
|
random_line_split
|
caching-interceptor.ts
|
import { Injectable } from '@angular/core';
import {
HttpEvent, HttpHeaders, HttpRequest, HttpResponse,
HttpInterceptor, HttpHandler
} from '@angular/common/http';
import { Observable, of } from 'rxjs';
import { startWith, tap } from 'rxjs/operators';
import { RequestCache } from '../request-cache.service';
import { searchUrl } from '../package-search/package-search.service';
/**
* If request is cachable (e.g., package search) and
* response is in cache return the cached response as observable.
* If has 'x-refresh' header that is true,
* then also re-run the package search, using response from next(),
* returning an observable that emits the cached response first.
*
* If not in cache or not cachable,
* pass request through to next()
*/
@Injectable()
export class CachingInterceptor implements HttpInterceptor {
constructor(private cache: RequestCache) {}
intercept(req: HttpRequest<any>, next: HttpHandler) {
// continue if not cachable.
if (!isCachable(req)) { return next.handle(req); }
const cachedResponse = this.cache.get(req);
// cache-then-refresh
if (req.headers.get('x-refresh')) {
const results$ = sendRequest(req, next, this.cache);
return cachedResponse ?
results$.pipe( startWith(cachedResponse) ) :
results$;
}
// cache-or-fetch
return cachedResponse ?
of(cachedResponse) : sendRequest(req, next, this.cache);
}
}
/** Is this request cachable? */
function isCachable(req: HttpRequest<any>) {
// Only GET requests are cachable
return req.method === 'GET' &&
// Only npm package search is cachable in this app
-1 < req.url.indexOf(searchUrl);
}
/**
* Get server response observable by sending request to `next()`.
* Will add the response to the cache on the way out.
*/
function
|
(
req: HttpRequest<any>,
next: HttpHandler,
cache: RequestCache): Observable<HttpEvent<any>> {
// No headers allowed in npm search request
const noHeaderReq = req.clone({ headers: new HttpHeaders() });
return next.handle(noHeaderReq).pipe(
tap(event => {
// There may be other events besides the response.
if (event instanceof HttpResponse) {
cache.put(req, event); // Update the cache.
}
})
);
}
|
sendRequest
|
identifier_name
|
caching-interceptor.ts
|
import { Injectable } from '@angular/core';
import {
HttpEvent, HttpHeaders, HttpRequest, HttpResponse,
HttpInterceptor, HttpHandler
} from '@angular/common/http';
import { Observable, of } from 'rxjs';
import { startWith, tap } from 'rxjs/operators';
import { RequestCache } from '../request-cache.service';
import { searchUrl } from '../package-search/package-search.service';
/**
* If request is cachable (e.g., package search) and
* response is in cache return the cached response as observable.
* If has 'x-refresh' header that is true,
* then also re-run the package search, using response from next(),
* returning an observable that emits the cached response first.
*
* If not in cache or not cachable,
* pass request through to next()
*/
@Injectable()
export class CachingInterceptor implements HttpInterceptor {
constructor(private cache: RequestCache) {}
intercept(req: HttpRequest<any>, next: HttpHandler) {
// continue if not cachable.
if (!isCachable(req)) { return next.handle(req); }
const cachedResponse = this.cache.get(req);
// cache-then-refresh
if (req.headers.get('x-refresh'))
|
// cache-or-fetch
return cachedResponse ?
of(cachedResponse) : sendRequest(req, next, this.cache);
}
}
/** Is this request cachable? */
function isCachable(req: HttpRequest<any>) {
// Only GET requests are cachable
return req.method === 'GET' &&
// Only npm package search is cachable in this app
-1 < req.url.indexOf(searchUrl);
}
/**
* Get server response observable by sending request to `next()`.
* Will add the response to the cache on the way out.
*/
function sendRequest(
req: HttpRequest<any>,
next: HttpHandler,
cache: RequestCache): Observable<HttpEvent<any>> {
// No headers allowed in npm search request
const noHeaderReq = req.clone({ headers: new HttpHeaders() });
return next.handle(noHeaderReq).pipe(
tap(event => {
// There may be other events besides the response.
if (event instanceof HttpResponse) {
cache.put(req, event); // Update the cache.
}
})
);
}
|
{
const results$ = sendRequest(req, next, this.cache);
return cachedResponse ?
results$.pipe( startWith(cachedResponse) ) :
results$;
}
|
conditional_block
|
regex.rs
|
// Copyright (c) 2018 The predicates-rs Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt;
use crate::reflection;
use crate::utils;
use crate::Predicate;
/// An error that occurred during parsing or compiling a regular expression.
pub type RegexError = regex::Error;
/// Predicate that uses regex matching
///
/// This is created by the `predicate::str::is_match`.
#[derive(Debug, Clone)]
pub struct RegexPredicate {
re: regex::Regex,
}
impl RegexPredicate {
/// Require a specific count of matches.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("T[a-z]*").unwrap().count(3);
/// assert_eq!(true, predicate_fn.eval("One Two Three Two One"));
/// assert_eq!(false, predicate_fn.eval("One Two Three"));
/// ```
pub fn count(self, count: usize) -> RegexMatchesPredicate {
RegexMatchesPredicate { re: self.re, count }
}
}
impl Predicate<str> for RegexPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.is_match(variable)
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
utils::default_find_case(self, expected, variable)
.map(|case| case.add_product(reflection::Product::new("var", variable.to_owned())))
}
}
impl reflection::PredicateReflection for RegexPredicate {}
impl fmt::Display for RegexPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Predicate that checks for repeated patterns.
///
/// This is created by `predicates::str::is_match(...).count`.
#[derive(Debug, Clone)]
pub struct RegexMatchesPredicate {
re: regex::Regex,
count: usize,
}
impl Predicate<str> for RegexMatchesPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.find_iter(variable).count() == self.count
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
let actual_count = self.re.find_iter(variable).count();
let result = self.count == actual_count;
if result == expected {
Some(
reflection::Case::new(Some(self), result)
.add_product(reflection::Product::new("var", variable.to_owned()))
.add_product(reflection::Product::new("actual count", actual_count)),
)
} else
|
}
}
impl reflection::PredicateReflection for RegexMatchesPredicate {
fn parameters<'a>(&'a self) -> Box<dyn Iterator<Item = reflection::Parameter<'a>> + 'a> {
let params = vec![reflection::Parameter::new("count", &self.count)];
Box::new(params.into_iter())
}
}
impl fmt::Display for RegexMatchesPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Creates a new `Predicate` that uses a regular expression to match the string.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("^Hello.*$").unwrap();
/// assert_eq!(true, predicate_fn.eval("Hello World"));
/// assert_eq!(false, predicate_fn.eval("Food World"));
/// ```
pub fn is_match<S>(pattern: S) -> Result<RegexPredicate, RegexError>
where
S: AsRef<str>,
{
regex::Regex::new(pattern.as_ref()).map(|re| RegexPredicate { re })
}
|
{
None
}
|
conditional_block
|
regex.rs
|
// Copyright (c) 2018 The predicates-rs Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt;
use crate::reflection;
use crate::utils;
use crate::Predicate;
/// An error that occurred during parsing or compiling a regular expression.
pub type RegexError = regex::Error;
/// Predicate that uses regex matching
///
/// This is created by the `predicate::str::is_match`.
#[derive(Debug, Clone)]
pub struct RegexPredicate {
re: regex::Regex,
}
impl RegexPredicate {
/// Require a specific count of matches.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("T[a-z]*").unwrap().count(3);
/// assert_eq!(true, predicate_fn.eval("One Two Three Two One"));
/// assert_eq!(false, predicate_fn.eval("One Two Three"));
/// ```
pub fn count(self, count: usize) -> RegexMatchesPredicate {
RegexMatchesPredicate { re: self.re, count }
}
}
impl Predicate<str> for RegexPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.is_match(variable)
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
utils::default_find_case(self, expected, variable)
.map(|case| case.add_product(reflection::Product::new("var", variable.to_owned())))
}
}
impl reflection::PredicateReflection for RegexPredicate {}
impl fmt::Display for RegexPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Predicate that checks for repeated patterns.
///
/// This is created by `predicates::str::is_match(...).count`.
#[derive(Debug, Clone)]
pub struct RegexMatchesPredicate {
re: regex::Regex,
count: usize,
}
impl Predicate<str> for RegexMatchesPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.find_iter(variable).count() == self.count
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
let actual_count = self.re.find_iter(variable).count();
let result = self.count == actual_count;
if result == expected {
Some(
reflection::Case::new(Some(self), result)
.add_product(reflection::Product::new("var", variable.to_owned()))
.add_product(reflection::Product::new("actual count", actual_count)),
)
} else {
None
}
}
}
impl reflection::PredicateReflection for RegexMatchesPredicate {
fn parameters<'a>(&'a self) -> Box<dyn Iterator<Item = reflection::Parameter<'a>> + 'a> {
let params = vec![reflection::Parameter::new("count", &self.count)];
Box::new(params.into_iter())
}
}
impl fmt::Display for RegexMatchesPredicate {
fn
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Creates a new `Predicate` that uses a regular expression to match the string.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("^Hello.*$").unwrap();
/// assert_eq!(true, predicate_fn.eval("Hello World"));
/// assert_eq!(false, predicate_fn.eval("Food World"));
/// ```
pub fn is_match<S>(pattern: S) -> Result<RegexPredicate, RegexError>
where
S: AsRef<str>,
{
regex::Regex::new(pattern.as_ref()).map(|re| RegexPredicate { re })
}
|
fmt
|
identifier_name
|
regex.rs
|
// Copyright (c) 2018 The predicates-rs Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
use crate::utils;
use crate::Predicate;
/// An error that occurred during parsing or compiling a regular expression.
pub type RegexError = regex::Error;
/// Predicate that uses regex matching
///
/// This is created by the `predicate::str::is_match`.
#[derive(Debug, Clone)]
pub struct RegexPredicate {
re: regex::Regex,
}
impl RegexPredicate {
/// Require a specific count of matches.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("T[a-z]*").unwrap().count(3);
/// assert_eq!(true, predicate_fn.eval("One Two Three Two One"));
/// assert_eq!(false, predicate_fn.eval("One Two Three"));
/// ```
pub fn count(self, count: usize) -> RegexMatchesPredicate {
RegexMatchesPredicate { re: self.re, count }
}
}
impl Predicate<str> for RegexPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.is_match(variable)
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
utils::default_find_case(self, expected, variable)
.map(|case| case.add_product(reflection::Product::new("var", variable.to_owned())))
}
}
impl reflection::PredicateReflection for RegexPredicate {}
impl fmt::Display for RegexPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Predicate that checks for repeated patterns.
///
/// This is created by `predicates::str::is_match(...).count`.
#[derive(Debug, Clone)]
pub struct RegexMatchesPredicate {
re: regex::Regex,
count: usize,
}
impl Predicate<str> for RegexMatchesPredicate {
fn eval(&self, variable: &str) -> bool {
self.re.find_iter(variable).count() == self.count
}
fn find_case<'a>(&'a self, expected: bool, variable: &str) -> Option<reflection::Case<'a>> {
let actual_count = self.re.find_iter(variable).count();
let result = self.count == actual_count;
if result == expected {
Some(
reflection::Case::new(Some(self), result)
.add_product(reflection::Product::new("var", variable.to_owned()))
.add_product(reflection::Product::new("actual count", actual_count)),
)
} else {
None
}
}
}
impl reflection::PredicateReflection for RegexMatchesPredicate {
fn parameters<'a>(&'a self) -> Box<dyn Iterator<Item = reflection::Parameter<'a>> + 'a> {
let params = vec![reflection::Parameter::new("count", &self.count)];
Box::new(params.into_iter())
}
}
impl fmt::Display for RegexMatchesPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let palette = crate::Palette::current();
write!(
f,
"{}.{}({})",
palette.var.paint("var"),
palette.description.paint("is_match"),
palette.expected.paint(&self.re),
)
}
}
/// Creates a new `Predicate` that uses a regular expression to match the string.
///
/// # Examples
///
/// ```
/// use predicates::prelude::*;
///
/// let predicate_fn = predicate::str::is_match("^Hello.*$").unwrap();
/// assert_eq!(true, predicate_fn.eval("Hello World"));
/// assert_eq!(false, predicate_fn.eval("Food World"));
/// ```
pub fn is_match<S>(pattern: S) -> Result<RegexPredicate, RegexError>
where
S: AsRef<str>,
{
regex::Regex::new(pattern.as_ref()).map(|re| RegexPredicate { re })
}
|
// except according to those terms.
use std::fmt;
use crate::reflection;
|
random_line_split
|
interfaces.py
|
from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def __str__():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file checker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
|
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
|
"""String details of the secret and/or how it was found"""
|
identifier_body
|
interfaces.py
|
from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def __str__():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
|
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file checker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
"""String details of the secret and/or how it was found"""
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
|
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
|
random_line_split
|
interfaces.py
|
from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def
|
():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file checker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
"""String details of the secret and/or how it was found"""
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
|
__str__
|
identifier_name
|
packed-struct-vec.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#9116) Bus error
use std::mem;
#[repr(packed)]
#[derive(Copy, PartialEq, Debug)]
struct Foo {
bar: u8,
baz: u64
}
pub fn main() {
let foos = [Foo { bar: 1, baz: 2 }; 10];
assert_eq!(mem::size_of::<[Foo; 10]>(), 90);
for i in 0u..10 {
assert_eq!(foos[i], Foo { bar: 1, baz: 2});
}
for &foo in &foos {
assert_eq!(foo, Foo { bar: 1, baz: 2 });
}
}
|
random_line_split
|
|
packed-struct-vec.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#9116) Bus error
use std::mem;
#[repr(packed)]
#[derive(Copy, PartialEq, Debug)]
struct Foo {
bar: u8,
baz: u64
}
pub fn
|
() {
let foos = [Foo { bar: 1, baz: 2 }; 10];
assert_eq!(mem::size_of::<[Foo; 10]>(), 90);
for i in 0u..10 {
assert_eq!(foos[i], Foo { bar: 1, baz: 2});
}
for &foo in &foos {
assert_eq!(foo, Foo { bar: 1, baz: 2 });
}
}
|
main
|
identifier_name
|
table_view_layout_3.js
|
function tv_layout3()
|
;
module.exports = tv_layout3;
|
{
var win = Titanium.UI.createWindow();
win.backgroundImage='/images/chip.jpg';
var data =[];
var section = Ti.UI.createTableViewSection();
data.push(section);
// ROW 1
var row1 = Ti.UI.createTableViewRow();
row1.backgroundColor = '#670000';
row1.selectedBackgroundColor = '#670000';
row1.height = 60;
row1.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item1 = Ti.UI.createLabel({
color:'#fff',
text:'Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row1.add(item1);
var cost1 = Ti.UI.createLabel({
color:'#fff',
text:'$2.50',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row1.add(cost1);
var add1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add1.addEventListener('click', function()
{
Ti.API.log("Adding...");
row1.backgroundColor = '#390A0E';
setTimeout(function()
{
delete1.show();
},100);
add1.hide();
cost1.animate({left:50, duration:100});
item1.animate({left:50, duration:100});
});
row1.add(add1);
var delete1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete1.addEventListener('click', function()
{
Ti.API.log("Deleting...");
row1.backgroundColor = '#670000';
delete1.hide();
add1.show();
cost1.animate({left:10, duration:100});
item1.animate({left:10, duration:100});
});
row1.add(delete1);
section.add(row1);
// ROW 2
var row2 = Ti.UI.createTableViewRow();
row2.backgroundColor = '#670000';
row2.selectedBackgroundColor = '#670000';
row2.height = 60;
row2.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item2 = Ti.UI.createLabel({
color:'#fff',
text:'Cheese Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row2.add(item2);
var cost2 = Ti.UI.createLabel({
color:'#fff',
text:'$3.25',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row2.add(cost2);
var add2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add2.addEventListener('click', function()
{
row2.backgroundColor = '#390A0E';
setTimeout(function()
{
delete2.show();
},100);
add2.hide();
cost2.animate({left:50, duration:100});
item2.animate({left:50, duration:100});
});
row2.add(add2);
var delete2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete2.addEventListener('click', function()
{
row2.backgroundColor = '#670000';
delete2.hide();
add2.show();
cost2.animate({left:10, duration:100});
item2.animate({left:10, duration:100});
});
row2.add(delete2);
section.add(row2);
var tableview = Titanium.UI.createTableView({
data:data,
style:Titanium.UI.iPhone.TableViewStyle.GROUPED,
backgroundColor:'transparent',
separatorColor:'#390A0E'
});
win.add(tableview);
return win;
}
|
identifier_body
|
table_view_layout_3.js
|
function
|
() {
var win = Titanium.UI.createWindow();
win.backgroundImage='/images/chip.jpg';
var data =[];
var section = Ti.UI.createTableViewSection();
data.push(section);
// ROW 1
var row1 = Ti.UI.createTableViewRow();
row1.backgroundColor = '#670000';
row1.selectedBackgroundColor = '#670000';
row1.height = 60;
row1.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item1 = Ti.UI.createLabel({
color:'#fff',
text:'Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row1.add(item1);
var cost1 = Ti.UI.createLabel({
color:'#fff',
text:'$2.50',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row1.add(cost1);
var add1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add1.addEventListener('click', function()
{
Ti.API.log("Adding...");
row1.backgroundColor = '#390A0E';
setTimeout(function()
{
delete1.show();
},100);
add1.hide();
cost1.animate({left:50, duration:100});
item1.animate({left:50, duration:100});
});
row1.add(add1);
var delete1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete1.addEventListener('click', function()
{
Ti.API.log("Deleting...");
row1.backgroundColor = '#670000';
delete1.hide();
add1.show();
cost1.animate({left:10, duration:100});
item1.animate({left:10, duration:100});
});
row1.add(delete1);
section.add(row1);
// ROW 2
var row2 = Ti.UI.createTableViewRow();
row2.backgroundColor = '#670000';
row2.selectedBackgroundColor = '#670000';
row2.height = 60;
row2.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item2 = Ti.UI.createLabel({
color:'#fff',
text:'Cheese Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row2.add(item2);
var cost2 = Ti.UI.createLabel({
color:'#fff',
text:'$3.25',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row2.add(cost2);
var add2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add2.addEventListener('click', function()
{
row2.backgroundColor = '#390A0E';
setTimeout(function()
{
delete2.show();
},100);
add2.hide();
cost2.animate({left:50, duration:100});
item2.animate({left:50, duration:100});
});
row2.add(add2);
var delete2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete2.addEventListener('click', function()
{
row2.backgroundColor = '#670000';
delete2.hide();
add2.show();
cost2.animate({left:10, duration:100});
item2.animate({left:10, duration:100});
});
row2.add(delete2);
section.add(row2);
var tableview = Titanium.UI.createTableView({
data:data,
style:Titanium.UI.iPhone.TableViewStyle.GROUPED,
backgroundColor:'transparent',
separatorColor:'#390A0E'
});
win.add(tableview);
return win;
};
module.exports = tv_layout3;
|
tv_layout3
|
identifier_name
|
table_view_layout_3.js
|
function tv_layout3() {
var win = Titanium.UI.createWindow();
win.backgroundImage='/images/chip.jpg';
var data =[];
var section = Ti.UI.createTableViewSection();
data.push(section);
// ROW 1
var row1 = Ti.UI.createTableViewRow();
row1.backgroundColor = '#670000';
row1.selectedBackgroundColor = '#670000';
row1.height = 60;
row1.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item1 = Ti.UI.createLabel({
color:'#fff',
text:'Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row1.add(item1);
var cost1 = Ti.UI.createLabel({
color:'#fff',
text:'$2.50',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row1.add(cost1);
var add1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add1.addEventListener('click', function()
{
Ti.API.log("Adding...");
row1.backgroundColor = '#390A0E';
setTimeout(function()
{
delete1.show();
},100);
add1.hide();
cost1.animate({left:50, duration:100});
item1.animate({left:50, duration:100});
});
row1.add(add1);
var delete1 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete1.addEventListener('click', function()
{
Ti.API.log("Deleting...");
row1.backgroundColor = '#670000';
delete1.hide();
|
});
row1.add(delete1);
section.add(row1);
// ROW 2
var row2 = Ti.UI.createTableViewRow();
row2.backgroundColor = '#670000';
row2.selectedBackgroundColor = '#670000';
row2.height = 60;
row2.addEventListener('click', function(e) {
Ti.API.log(e.source+" click at ("+e.x+","+e.y+")");
});
var item2 = Ti.UI.createLabel({
color:'#fff',
text:'Cheese Burger',
font:{fontSize:20, fontWeight:'bold'},
top:3,
left:10,
height:30,
width:100
});
row2.add(item2);
var cost2 = Ti.UI.createLabel({
color:'#fff',
text:'$3.25',
font:{fontSize:16},
top:26,
left:10,
height:25,
width:150
});
row2.add(cost2);
var add2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/addDefault.png',
height:27,
width:27,
top:15,
right:10
});
add2.addEventListener('click', function()
{
row2.backgroundColor = '#390A0E';
setTimeout(function()
{
delete2.show();
},100);
add2.hide();
cost2.animate({left:50, duration:100});
item2.animate({left:50, duration:100});
});
row2.add(add2);
var delete2 = Ti.UI.createButton({
backgroundImage:'/images/groupedview/minusDefault.png',
height:27,
width:27,
top:15,
left:10,
visible:false
});
delete2.addEventListener('click', function()
{
row2.backgroundColor = '#670000';
delete2.hide();
add2.show();
cost2.animate({left:10, duration:100});
item2.animate({left:10, duration:100});
});
row2.add(delete2);
section.add(row2);
var tableview = Titanium.UI.createTableView({
data:data,
style:Titanium.UI.iPhone.TableViewStyle.GROUPED,
backgroundColor:'transparent',
separatorColor:'#390A0E'
});
win.add(tableview);
return win;
};
module.exports = tv_layout3;
|
add1.show();
cost1.animate({left:10, duration:100});
item1.animate({left:10, duration:100});
|
random_line_split
|
mod.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
use core::unicode::property::Pattern_White_Space;
use rustc::ty;
use syntax_pos::Span;
pub mod borrowck_errors;
pub mod elaborate_drops;
pub mod def_use;
pub mod patch;
mod alignment;
mod graphviz;
pub(crate) mod pretty;
pub mod liveness;
pub mod collect_writes;
pub use self::alignment::is_disaligned;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
pub use self::graphviz::{write_mir_graphviz};
pub use self::graphviz::write_node_label as write_graphviz_node_label;
/// If possible, suggest replacing `ref` with `ref mut`.
pub fn suggest_ref_mut<'cx, 'gcx, 'tcx>(
tcx: ty::TyCtxt<'cx, 'gcx, 'tcx>,
binding_span: Span,
) -> Option<(String)> {
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).unwrap();
if hi_src.starts_with("ref")
&& hi_src["ref".len()..].starts_with(Pattern_White_Space)
{
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else {
None
}
}
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
mod.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::unicode::property::Pattern_White_Space;
use rustc::ty;
use syntax_pos::Span;
pub mod borrowck_errors;
pub mod elaborate_drops;
pub mod def_use;
pub mod patch;
mod alignment;
mod graphviz;
pub(crate) mod pretty;
pub mod liveness;
pub mod collect_writes;
pub use self::alignment::is_disaligned;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
pub use self::graphviz::{write_mir_graphviz};
pub use self::graphviz::write_node_label as write_graphviz_node_label;
/// If possible, suggest replacing `ref` with `ref mut`.
pub fn
|
<'cx, 'gcx, 'tcx>(
tcx: ty::TyCtxt<'cx, 'gcx, 'tcx>,
binding_span: Span,
) -> Option<(String)> {
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).unwrap();
if hi_src.starts_with("ref")
&& hi_src["ref".len()..].starts_with(Pattern_White_Space)
{
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else {
None
}
}
|
suggest_ref_mut
|
identifier_name
|
mod.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::unicode::property::Pattern_White_Space;
use rustc::ty;
use syntax_pos::Span;
pub mod borrowck_errors;
pub mod elaborate_drops;
pub mod def_use;
pub mod patch;
mod alignment;
mod graphviz;
pub(crate) mod pretty;
pub mod liveness;
pub mod collect_writes;
pub use self::alignment::is_disaligned;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
pub use self::graphviz::{write_mir_graphviz};
pub use self::graphviz::write_node_label as write_graphviz_node_label;
/// If possible, suggest replacing `ref` with `ref mut`.
pub fn suggest_ref_mut<'cx, 'gcx, 'tcx>(
tcx: ty::TyCtxt<'cx, 'gcx, 'tcx>,
binding_span: Span,
) -> Option<(String)> {
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).unwrap();
if hi_src.starts_with("ref")
&& hi_src["ref".len()..].starts_with(Pattern_White_Space)
{
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else
|
}
|
{
None
}
|
conditional_block
|
mod.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::unicode::property::Pattern_White_Space;
use rustc::ty;
use syntax_pos::Span;
pub mod borrowck_errors;
pub mod elaborate_drops;
pub mod def_use;
pub mod patch;
mod alignment;
mod graphviz;
pub(crate) mod pretty;
pub mod liveness;
pub mod collect_writes;
pub use self::alignment::is_disaligned;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
pub use self::graphviz::{write_mir_graphviz};
pub use self::graphviz::write_node_label as write_graphviz_node_label;
/// If possible, suggest replacing `ref` with `ref mut`.
pub fn suggest_ref_mut<'cx, 'gcx, 'tcx>(
tcx: ty::TyCtxt<'cx, 'gcx, 'tcx>,
binding_span: Span,
) -> Option<(String)>
|
{
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).unwrap();
if hi_src.starts_with("ref")
&& hi_src["ref".len()..].starts_with(Pattern_White_Space)
{
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else {
None
}
}
|
identifier_body
|
|
aggregation.py
|
'''
TODO:
optimize adds, multiplies, 'or' and 'and' as they can accept more than two values
validate type info on specific functions
'''
from .matching import AstHandler, ParseError, DateTimeFunc
class AggregationParser(AstHandler):
FUNC_TO_ARGS = {'concat': '+', # more than 1
'strcasecmp': 2,
'substr': 3,
'toLower': 1,
'toUpper': 1,
'dayOfYear': 1,
'dayOfMonth': 1,
'dayOfWeek': 1,
'year': 1,
'month': 1,
'week': 1,
'hour': 1,
'minute': 1,
'second': 1,
'millisecond': 1,
'date': 1,
'cmp': 2,
'ifnull': 2}
SPECIAL_VALUES = {'False': False,
'false': False,
'True': True,
'true': True,
'None': None,
'null': None}
def handle_Str(self, node):
return node.s
def handle_Num(self, node):
return node.n
def handle_Name(self, node):
return self.SPECIAL_VALUES.get(node.id, '$' + node.id)
def handle_NameConstant(self,node):
return self.SPECIAL_VALUES.get(str(node.value),node.value)
|
return '${0}.{1}'.format(self.handle(node.value), node.attr).replace('$$', '$')
def handle_UnaryOp(self, op):
return {self.handle(op.op): self.handle(op.operand)}
def handle_IfExp(self, op):
return {'$cond': [self.handle(op.test),
self.handle(op.body),
self.handle(op.orelse)]}
def handle_Call(self, node):
name = node.func.id
if name == 'date':
return DateTimeFunc().handle_date(node)
if name not in self.FUNC_TO_ARGS:
raise ParseError('Unsupported function ({0}).'.format(name),
col_offset=node.col_offset)
if len(node.args) != self.FUNC_TO_ARGS[name] and \
self.FUNC_TO_ARGS[name] != '+' or len(node.args) == 0:
raise ParseError('Invalid number of arguments to function {0}'.format(name),
col_offset=node.col_offset)
# because of SERVER-9289 the following fails: {'$year': {'$add' :['$time_stamp', 1]}}
# wrapping both single arg functions in a list solves it: {'$year': [{'$add' :['$time_stamp', 1]}]}
return {'$' + node.func.id: list(map(self.handle, node.args))}
def handle_BinOp(self, node):
return {self.handle(node.op): [self.handle(node.left),
self.handle(node.right)]}
def handle_Not(self, not_node):
return '$not'
def handle_And(self, op):
return '$and'
def handle_Or(self, op):
return '$or'
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_Compare(self, node):
if len(node.ops) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(node.ops)),
col_offset=node.comparators[1].col_offset)
return {self.handle(node.ops[0]): [self.handle(node.left),
self.handle(node.comparators[0])]}
def handle_Gt(self, node):
return '$gt'
def handle_Lt(self,node):
return '$lt'
def handle_GtE(self, node):
return '$gte'
def handle_LtE(self, node):
return '$lte'
def handle_Eq(self, node):
return '$eq'
def handle_NotEq(self, node):
return '$ne'
def handle_Add(self, node):
return '$add'
def handle_Sub(self, node):
return '$subtract'
def handle_Mod(self, node):
return '$mod'
def handle_Mult(self, node):
return '$multiply'
def handle_Div(self, node):
return '$divide'
class AggregationGroupParser(AstHandler):
GROUP_FUNCTIONS = ['addToSet', 'push', 'first', 'last',
'max', 'min', 'avg', 'sum']
def handle_Call(self, node):
if len(node.args) != 1:
raise ParseError('The {0} group aggregation function accepts one argument'.format(node.func.id),
col_offset=node.col_offset)
if node.func.id not in self.GROUP_FUNCTIONS:
raise ParseError('Unsupported group function: {0}'.format(node.func.id),
col_offset=node.col_offset,
options=self.GROUP_FUNCTIONS)
return {'$' + node.func.id: AggregationParser().handle(node.args[0])}
|
def handle_Attribute(self, node):
|
random_line_split
|
aggregation.py
|
'''
TODO:
optimize adds, multiplies, 'or' and 'and' as they can accept more than two values
validate type info on specific functions
'''
from .matching import AstHandler, ParseError, DateTimeFunc
class AggregationParser(AstHandler):
FUNC_TO_ARGS = {'concat': '+', # more than 1
'strcasecmp': 2,
'substr': 3,
'toLower': 1,
'toUpper': 1,
'dayOfYear': 1,
'dayOfMonth': 1,
'dayOfWeek': 1,
'year': 1,
'month': 1,
'week': 1,
'hour': 1,
'minute': 1,
'second': 1,
'millisecond': 1,
'date': 1,
'cmp': 2,
'ifnull': 2}
SPECIAL_VALUES = {'False': False,
'false': False,
'True': True,
'true': True,
'None': None,
'null': None}
def handle_Str(self, node):
return node.s
def handle_Num(self, node):
return node.n
def handle_Name(self, node):
return self.SPECIAL_VALUES.get(node.id, '$' + node.id)
def handle_NameConstant(self,node):
return self.SPECIAL_VALUES.get(str(node.value),node.value)
def handle_Attribute(self, node):
return '${0}.{1}'.format(self.handle(node.value), node.attr).replace('$$', '$')
def handle_UnaryOp(self, op):
return {self.handle(op.op): self.handle(op.operand)}
def handle_IfExp(self, op):
return {'$cond': [self.handle(op.test),
self.handle(op.body),
self.handle(op.orelse)]}
def handle_Call(self, node):
name = node.func.id
if name == 'date':
return DateTimeFunc().handle_date(node)
if name not in self.FUNC_TO_ARGS:
raise ParseError('Unsupported function ({0}).'.format(name),
col_offset=node.col_offset)
if len(node.args) != self.FUNC_TO_ARGS[name] and \
self.FUNC_TO_ARGS[name] != '+' or len(node.args) == 0:
raise ParseError('Invalid number of arguments to function {0}'.format(name),
col_offset=node.col_offset)
# because of SERVER-9289 the following fails: {'$year': {'$add' :['$time_stamp', 1]}}
# wrapping both single arg functions in a list solves it: {'$year': [{'$add' :['$time_stamp', 1]}]}
return {'$' + node.func.id: list(map(self.handle, node.args))}
def handle_BinOp(self, node):
return {self.handle(node.op): [self.handle(node.left),
self.handle(node.right)]}
def handle_Not(self, not_node):
return '$not'
def handle_And(self, op):
return '$and'
def handle_Or(self, op):
return '$or'
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_Compare(self, node):
if len(node.ops) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(node.ops)),
col_offset=node.comparators[1].col_offset)
return {self.handle(node.ops[0]): [self.handle(node.left),
self.handle(node.comparators[0])]}
def handle_Gt(self, node):
return '$gt'
def handle_Lt(self,node):
return '$lt'
def handle_GtE(self, node):
return '$gte'
def handle_LtE(self, node):
return '$lte'
def handle_Eq(self, node):
return '$eq'
def handle_NotEq(self, node):
return '$ne'
def handle_Add(self, node):
return '$add'
def handle_Sub(self, node):
return '$subtract'
def handle_Mod(self, node):
return '$mod'
def handle_Mult(self, node):
return '$multiply'
def handle_Div(self, node):
return '$divide'
class AggregationGroupParser(AstHandler):
GROUP_FUNCTIONS = ['addToSet', 'push', 'first', 'last',
'max', 'min', 'avg', 'sum']
def handle_Call(self, node):
if len(node.args) != 1:
|
if node.func.id not in self.GROUP_FUNCTIONS:
raise ParseError('Unsupported group function: {0}'.format(node.func.id),
col_offset=node.col_offset,
options=self.GROUP_FUNCTIONS)
return {'$' + node.func.id: AggregationParser().handle(node.args[0])}
|
raise ParseError('The {0} group aggregation function accepts one argument'.format(node.func.id),
col_offset=node.col_offset)
|
conditional_block
|
aggregation.py
|
'''
TODO:
optimize adds, multiplies, 'or' and 'and' as they can accept more than two values
validate type info on specific functions
'''
from .matching import AstHandler, ParseError, DateTimeFunc
class AggregationParser(AstHandler):
FUNC_TO_ARGS = {'concat': '+', # more than 1
'strcasecmp': 2,
'substr': 3,
'toLower': 1,
'toUpper': 1,
'dayOfYear': 1,
'dayOfMonth': 1,
'dayOfWeek': 1,
'year': 1,
'month': 1,
'week': 1,
'hour': 1,
'minute': 1,
'second': 1,
'millisecond': 1,
'date': 1,
'cmp': 2,
'ifnull': 2}
SPECIAL_VALUES = {'False': False,
'false': False,
'True': True,
'true': True,
'None': None,
'null': None}
def handle_Str(self, node):
return node.s
def handle_Num(self, node):
return node.n
def handle_Name(self, node):
return self.SPECIAL_VALUES.get(node.id, '$' + node.id)
def handle_NameConstant(self,node):
return self.SPECIAL_VALUES.get(str(node.value),node.value)
def handle_Attribute(self, node):
return '${0}.{1}'.format(self.handle(node.value), node.attr).replace('$$', '$')
def handle_UnaryOp(self, op):
return {self.handle(op.op): self.handle(op.operand)}
def handle_IfExp(self, op):
return {'$cond': [self.handle(op.test),
self.handle(op.body),
self.handle(op.orelse)]}
def handle_Call(self, node):
name = node.func.id
if name == 'date':
return DateTimeFunc().handle_date(node)
if name not in self.FUNC_TO_ARGS:
raise ParseError('Unsupported function ({0}).'.format(name),
col_offset=node.col_offset)
if len(node.args) != self.FUNC_TO_ARGS[name] and \
self.FUNC_TO_ARGS[name] != '+' or len(node.args) == 0:
raise ParseError('Invalid number of arguments to function {0}'.format(name),
col_offset=node.col_offset)
# because of SERVER-9289 the following fails: {'$year': {'$add' :['$time_stamp', 1]}}
# wrapping both single arg functions in a list solves it: {'$year': [{'$add' :['$time_stamp', 1]}]}
return {'$' + node.func.id: list(map(self.handle, node.args))}
def handle_BinOp(self, node):
return {self.handle(node.op): [self.handle(node.left),
self.handle(node.right)]}
def handle_Not(self, not_node):
return '$not'
def handle_And(self, op):
return '$and'
def handle_Or(self, op):
return '$or'
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_Compare(self, node):
if len(node.ops) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(node.ops)),
col_offset=node.comparators[1].col_offset)
return {self.handle(node.ops[0]): [self.handle(node.left),
self.handle(node.comparators[0])]}
def handle_Gt(self, node):
return '$gt'
def handle_Lt(self,node):
return '$lt'
def handle_GtE(self, node):
return '$gte'
def handle_LtE(self, node):
return '$lte'
def
|
(self, node):
return '$eq'
def handle_NotEq(self, node):
return '$ne'
def handle_Add(self, node):
return '$add'
def handle_Sub(self, node):
return '$subtract'
def handle_Mod(self, node):
return '$mod'
def handle_Mult(self, node):
return '$multiply'
def handle_Div(self, node):
return '$divide'
class AggregationGroupParser(AstHandler):
GROUP_FUNCTIONS = ['addToSet', 'push', 'first', 'last',
'max', 'min', 'avg', 'sum']
def handle_Call(self, node):
if len(node.args) != 1:
raise ParseError('The {0} group aggregation function accepts one argument'.format(node.func.id),
col_offset=node.col_offset)
if node.func.id not in self.GROUP_FUNCTIONS:
raise ParseError('Unsupported group function: {0}'.format(node.func.id),
col_offset=node.col_offset,
options=self.GROUP_FUNCTIONS)
return {'$' + node.func.id: AggregationParser().handle(node.args[0])}
|
handle_Eq
|
identifier_name
|
aggregation.py
|
'''
TODO:
optimize adds, multiplies, 'or' and 'and' as they can accept more than two values
validate type info on specific functions
'''
from .matching import AstHandler, ParseError, DateTimeFunc
class AggregationParser(AstHandler):
FUNC_TO_ARGS = {'concat': '+', # more than 1
'strcasecmp': 2,
'substr': 3,
'toLower': 1,
'toUpper': 1,
'dayOfYear': 1,
'dayOfMonth': 1,
'dayOfWeek': 1,
'year': 1,
'month': 1,
'week': 1,
'hour': 1,
'minute': 1,
'second': 1,
'millisecond': 1,
'date': 1,
'cmp': 2,
'ifnull': 2}
SPECIAL_VALUES = {'False': False,
'false': False,
'True': True,
'true': True,
'None': None,
'null': None}
def handle_Str(self, node):
return node.s
def handle_Num(self, node):
return node.n
def handle_Name(self, node):
return self.SPECIAL_VALUES.get(node.id, '$' + node.id)
def handle_NameConstant(self,node):
return self.SPECIAL_VALUES.get(str(node.value),node.value)
def handle_Attribute(self, node):
return '${0}.{1}'.format(self.handle(node.value), node.attr).replace('$$', '$')
def handle_UnaryOp(self, op):
return {self.handle(op.op): self.handle(op.operand)}
def handle_IfExp(self, op):
return {'$cond': [self.handle(op.test),
self.handle(op.body),
self.handle(op.orelse)]}
def handle_Call(self, node):
name = node.func.id
if name == 'date':
return DateTimeFunc().handle_date(node)
if name not in self.FUNC_TO_ARGS:
raise ParseError('Unsupported function ({0}).'.format(name),
col_offset=node.col_offset)
if len(node.args) != self.FUNC_TO_ARGS[name] and \
self.FUNC_TO_ARGS[name] != '+' or len(node.args) == 0:
raise ParseError('Invalid number of arguments to function {0}'.format(name),
col_offset=node.col_offset)
# because of SERVER-9289 the following fails: {'$year': {'$add' :['$time_stamp', 1]}}
# wrapping both single arg functions in a list solves it: {'$year': [{'$add' :['$time_stamp', 1]}]}
return {'$' + node.func.id: list(map(self.handle, node.args))}
def handle_BinOp(self, node):
return {self.handle(node.op): [self.handle(node.left),
self.handle(node.right)]}
def handle_Not(self, not_node):
return '$not'
def handle_And(self, op):
return '$and'
def handle_Or(self, op):
return '$or'
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_Compare(self, node):
if len(node.ops) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(node.ops)),
col_offset=node.comparators[1].col_offset)
return {self.handle(node.ops[0]): [self.handle(node.left),
self.handle(node.comparators[0])]}
def handle_Gt(self, node):
return '$gt'
def handle_Lt(self,node):
return '$lt'
def handle_GtE(self, node):
return '$gte'
def handle_LtE(self, node):
return '$lte'
def handle_Eq(self, node):
return '$eq'
def handle_NotEq(self, node):
return '$ne'
def handle_Add(self, node):
return '$add'
def handle_Sub(self, node):
|
def handle_Mod(self, node):
return '$mod'
def handle_Mult(self, node):
return '$multiply'
def handle_Div(self, node):
return '$divide'
class AggregationGroupParser(AstHandler):
GROUP_FUNCTIONS = ['addToSet', 'push', 'first', 'last',
'max', 'min', 'avg', 'sum']
def handle_Call(self, node):
if len(node.args) != 1:
raise ParseError('The {0} group aggregation function accepts one argument'.format(node.func.id),
col_offset=node.col_offset)
if node.func.id not in self.GROUP_FUNCTIONS:
raise ParseError('Unsupported group function: {0}'.format(node.func.id),
col_offset=node.col_offset,
options=self.GROUP_FUNCTIONS)
return {'$' + node.func.id: AggregationParser().handle(node.args[0])}
|
return '$subtract'
|
identifier_body
|
main.py
|
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
|
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
|
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
|
conditional_block
|
main.py
|
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def
|
(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
|
removeFailed
|
identifier_name
|
main.py
|
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
|
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
|
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
|
random_line_split
|
main.py
|
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
|
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
|
identifier_body
|
|
compat.py
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
|
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
]
|
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
|
identifier_body
|
compat.py
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def
|
(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
]
|
iterbytes
|
identifier_name
|
compat.py
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
|
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
]
|
raise socket.error(97, 'Address family not supported by protocol')
|
conditional_block
|
compat.py
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
|
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
]
|
If an offset is given, the copy starts at that offset. If a size is
|
random_line_split
|
setup.py
|
"""setuptools based packaging and installation module.
Defines the project properties, as well as a special command to build a
standalone executable, by using PyInstaller.
Run with --help to see available options.
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import sys
import distutils
import subprocess
from amtt.version import __version__ as amtt_version
here = path.abspath(path.dirname(__file__))
class BuildStandaloneExeCommand(distutils.cmd.Command):
"""
Custom command to build standalone executable using PyInstaller.
Invoke by executing:
python setup.py build_standalone
"""
description = 'build standalone executable with PyInstaller'
user_options = []
def initialize_options(self):
"""Set default values for user options."""
def finalize_options(self):
"""Post-process user options."""
def
|
(self):
"""Run command."""
sep = ';' if sys.platform == 'win32' else ':'
command = ' '.join([
'pyinstaller',
' --onefile',
' --add-data amtt/ui/icon64x64.png{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/ui/icon64x64.gif{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/exporter/isograph/emitter/xml/template-2.1.xml'
'{sep}amtt/exporter/isograph/emitter/xml'.format(sep=sep),
' --hidden-import pyexcel_xls.xls'
' amtt/main.py',
' -i resources/icon.ico',
' -n amtt_{plat}-{ver}'.format(plat=sys.platform,
ver=amtt_version),
])
self.announce('Building standalone executable with PyInstaller',
level=distutils.log.INFO)
subprocess.check_call(command, shell=True)
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='amtt',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=amtt_version,
description='Availability Modelling Translation Toolkit',
long_description=long_description,
# The project's main homepage.
url='https://github.com/errikos/amtt',
# Author details
author='Ergys Dona',
author_email='[email protected]',
# Choose your license
license='GPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='availability engineering model translation toolkit',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'networkx',
'pydotplus',
'pyexcel',
'pyexcel-xls',
'sliding-window',
'lxml',
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'amtt.ui': ['icon64x64.png', 'icon64x64.gif'],
'amtt.exporter.isograph.emitter.xml': ['template-2.1.xml'],
},
# List additional groups of dependencies here (e.g. documentation
# dependencies). You can install these using the following syntax:
# $ pip install -e .[docs]
extras_require={
'docs': ['Sphinx', 'sphinx-rtd-theme'],
'build': ['PyInstaller'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'amtt=amtt.main:main',
'amtt-gui=amtt.main:ui_main',
],
},
# Provide custom command for building standalone executable
cmdclass={
'build_standalone': BuildStandaloneExeCommand,
},
)
|
run
|
identifier_name
|
setup.py
|
"""setuptools based packaging and installation module.
Defines the project properties, as well as a special command to build a
standalone executable, by using PyInstaller.
Run with --help to see available options.
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import sys
import distutils
import subprocess
from amtt.version import __version__ as amtt_version
here = path.abspath(path.dirname(__file__))
class BuildStandaloneExeCommand(distutils.cmd.Command):
|
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='amtt',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=amtt_version,
description='Availability Modelling Translation Toolkit',
long_description=long_description,
# The project's main homepage.
url='https://github.com/errikos/amtt',
# Author details
author='Ergys Dona',
author_email='[email protected]',
# Choose your license
license='GPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='availability engineering model translation toolkit',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'networkx',
'pydotplus',
'pyexcel',
'pyexcel-xls',
'sliding-window',
'lxml',
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'amtt.ui': ['icon64x64.png', 'icon64x64.gif'],
'amtt.exporter.isograph.emitter.xml': ['template-2.1.xml'],
},
# List additional groups of dependencies here (e.g. documentation
# dependencies). You can install these using the following syntax:
# $ pip install -e .[docs]
extras_require={
'docs': ['Sphinx', 'sphinx-rtd-theme'],
'build': ['PyInstaller'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'amtt=amtt.main:main',
'amtt-gui=amtt.main:ui_main',
],
},
# Provide custom command for building standalone executable
cmdclass={
'build_standalone': BuildStandaloneExeCommand,
},
)
|
"""
Custom command to build standalone executable using PyInstaller.
Invoke by executing:
python setup.py build_standalone
"""
description = 'build standalone executable with PyInstaller'
user_options = []
def initialize_options(self):
"""Set default values for user options."""
def finalize_options(self):
"""Post-process user options."""
def run(self):
"""Run command."""
sep = ';' if sys.platform == 'win32' else ':'
command = ' '.join([
'pyinstaller',
' --onefile',
' --add-data amtt/ui/icon64x64.png{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/ui/icon64x64.gif{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/exporter/isograph/emitter/xml/template-2.1.xml'
'{sep}amtt/exporter/isograph/emitter/xml'.format(sep=sep),
' --hidden-import pyexcel_xls.xls'
' amtt/main.py',
' -i resources/icon.ico',
' -n amtt_{plat}-{ver}'.format(plat=sys.platform,
ver=amtt_version),
])
self.announce('Building standalone executable with PyInstaller',
level=distutils.log.INFO)
subprocess.check_call(command, shell=True)
|
identifier_body
|
setup.py
|
"""setuptools based packaging and installation module.
|
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import sys
import distutils
import subprocess
from amtt.version import __version__ as amtt_version
here = path.abspath(path.dirname(__file__))
class BuildStandaloneExeCommand(distutils.cmd.Command):
"""
Custom command to build standalone executable using PyInstaller.
Invoke by executing:
python setup.py build_standalone
"""
description = 'build standalone executable with PyInstaller'
user_options = []
def initialize_options(self):
"""Set default values for user options."""
def finalize_options(self):
"""Post-process user options."""
def run(self):
"""Run command."""
sep = ';' if sys.platform == 'win32' else ':'
command = ' '.join([
'pyinstaller',
' --onefile',
' --add-data amtt/ui/icon64x64.png{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/ui/icon64x64.gif{sep}amtt/ui'.format(sep=sep),
' --add-data amtt/exporter/isograph/emitter/xml/template-2.1.xml'
'{sep}amtt/exporter/isograph/emitter/xml'.format(sep=sep),
' --hidden-import pyexcel_xls.xls'
' amtt/main.py',
' -i resources/icon.ico',
' -n amtt_{plat}-{ver}'.format(plat=sys.platform,
ver=amtt_version),
])
self.announce('Building standalone executable with PyInstaller',
level=distutils.log.INFO)
subprocess.check_call(command, shell=True)
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='amtt',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=amtt_version,
description='Availability Modelling Translation Toolkit',
long_description=long_description,
# The project's main homepage.
url='https://github.com/errikos/amtt',
# Author details
author='Ergys Dona',
author_email='[email protected]',
# Choose your license
license='GPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='availability engineering model translation toolkit',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'networkx',
'pydotplus',
'pyexcel',
'pyexcel-xls',
'sliding-window',
'lxml',
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'amtt.ui': ['icon64x64.png', 'icon64x64.gif'],
'amtt.exporter.isograph.emitter.xml': ['template-2.1.xml'],
},
# List additional groups of dependencies here (e.g. documentation
# dependencies). You can install these using the following syntax:
# $ pip install -e .[docs]
extras_require={
'docs': ['Sphinx', 'sphinx-rtd-theme'],
'build': ['PyInstaller'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'amtt=amtt.main:main',
'amtt-gui=amtt.main:ui_main',
],
},
# Provide custom command for building standalone executable
cmdclass={
'build_standalone': BuildStandaloneExeCommand,
},
)
|
Defines the project properties, as well as a special command to build a
standalone executable, by using PyInstaller.
Run with --help to see available options.
|
random_line_split
|
main.py
|
# My files
from handlers import MainPage
from handlers import WelcomePage
from handlers import SignUpPage
from handlers import SignIn
from handlers import SignOut
from handlers import NewPost
from handlers import EditPost
from handlers import DeletePost
from handlers import SinglePost
from handlers import LikePost
from handlers import DislikePost
|
import webapp2
app = webapp2.WSGIApplication([
('/', MainPage),
('/signup', SignUpPage),
('/welcome', WelcomePage),
('/post/([0-9]+)', SinglePost),
('/new-post', NewPost),
('/edit-post/([0-9]+)', EditPost),
('/delete-post', DeletePost),
('/like-post', LikePost),
('/dislike-post', DislikePost),
('/edit-comment', EditComment),
('/delete-comment', DeleteComment),
('/login', SignIn),
('/logout', SignOut)
], debug=True)
|
from handlers import EditComment
from handlers import DeleteComment
|
random_line_split
|
new-note.directive.js
|
(function(){
angular.module('app')
.directive('newNote', function()
{
return {
templateUrl: "new-note.html",
scope: {
notes: '='
},
controller: NewNoteController
};
function
|
($scope, NoteService)
{
$scope.blankNote = null;
$scope.createNote = createNote;
$scope.saveNote = saveNote;
function createNote()
{
$scope.blankNote = NoteService.createBlankNote();
}
function saveNote()
{
if ($scope.blankNote && ($scope.blankNote.title.length > 0 || $scope.blankNote.content.length > 0))
{
NoteService.saveNote($scope.blankNote).then(function(savedNote)
{
$scope.notes.unshift(savedNote);
});
}
$scope.blankNote = null;
}
}
})
;
})();
|
NewNoteController
|
identifier_name
|
new-note.directive.js
|
(function(){
angular.module('app')
.directive('newNote', function()
{
return {
templateUrl: "new-note.html",
scope: {
notes: '='
},
controller: NewNoteController
};
function NewNoteController($scope, NoteService)
{
$scope.blankNote = null;
$scope.createNote = createNote;
$scope.saveNote = saveNote;
function createNote()
{
$scope.blankNote = NoteService.createBlankNote();
}
function saveNote()
{
if ($scope.blankNote && ($scope.blankNote.title.length > 0 || $scope.blankNote.content.length > 0))
|
});
}
$scope.blankNote = null;
}
}
})
;
})();
|
{
NoteService.saveNote($scope.blankNote).then(function(savedNote)
{
$scope.notes.unshift(savedNote);
|
random_line_split
|
new-note.directive.js
|
(function(){
angular.module('app')
.directive('newNote', function()
{
return {
templateUrl: "new-note.html",
scope: {
notes: '='
},
controller: NewNoteController
};
function NewNoteController($scope, NoteService)
{
$scope.blankNote = null;
$scope.createNote = createNote;
$scope.saveNote = saveNote;
function createNote()
{
$scope.blankNote = NoteService.createBlankNote();
}
function saveNote()
{
if ($scope.blankNote && ($scope.blankNote.title.length > 0 || $scope.blankNote.content.length > 0))
|
$scope.blankNote = null;
}
}
})
;
})();
|
{
NoteService.saveNote($scope.blankNote).then(function(savedNote)
{
$scope.notes.unshift(savedNote);
});
}
|
conditional_block
|
new-note.directive.js
|
(function(){
angular.module('app')
.directive('newNote', function()
{
return {
templateUrl: "new-note.html",
scope: {
notes: '='
},
controller: NewNoteController
};
function NewNoteController($scope, NoteService)
|
})
;
})();
|
{
$scope.blankNote = null;
$scope.createNote = createNote;
$scope.saveNote = saveNote;
function createNote()
{
$scope.blankNote = NoteService.createBlankNote();
}
function saveNote()
{
if ($scope.blankNote && ($scope.blankNote.title.length > 0 || $scope.blankNote.content.length > 0))
{
NoteService.saveNote($scope.blankNote).then(function(savedNote)
{
$scope.notes.unshift(savedNote);
});
}
$scope.blankNote = null;
}
}
|
identifier_body
|
forceListMetadata.ts
|
/*
* Copyright (c) 2019, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
import {
CliCommandExecution,
CliCommandExecutor,
Command,
CommandOutput,
SfdxCommandBuilder
} from '@salesforce/salesforcedx-utils-vscode/out/src/cli';
import * as fs from 'fs';
import { SfdxCommandletExecutor } from '../commands/util';
import { getRootWorkspacePath } from '../util';
export class ForceListMetadataExecutor extends SfdxCommandletExecutor<string> {
private metadataType: string;
private defaultUsernameOrAlias: string;
private folder?: string;
public constructor(
metadataType: string,
defaultUsernameOrAlias: string,
folder?: string
) {
super();
this.metadataType = metadataType;
this.defaultUsernameOrAlias = defaultUsernameOrAlias;
this.folder = folder;
}
public build(data: {}): Command {
const builder = new SfdxCommandBuilder()
.withArg('force:mdapi:listmetadata')
.withFlag('-m', this.metadataType)
.withFlag('-u', this.defaultUsernameOrAlias)
.withLogName('force_mdapi_listmetadata')
.withJson();
if (this.folder) {
builder.withFlag('--folder', this.folder);
}
return builder.build();
}
|
const startTime = process.hrtime();
const execution = new CliCommandExecutor(this.build({}), {
cwd: getRootWorkspacePath()
}).execute();
execution.processExitSubject.subscribe(() => {
this.logMetric(execution.command.logName, startTime);
});
return execution;
}
}
export async function forceListMetadata(
metadataType: string,
defaultUsernameOrAlias: string,
outputPath: string,
folder?: string
): Promise<string> {
const forceListMetadataExecutor = new ForceListMetadataExecutor(
metadataType,
defaultUsernameOrAlias,
folder
);
const execution = forceListMetadataExecutor.execute();
const cmdOutput = new CommandOutput();
const result = await cmdOutput.getCmdResult(execution);
fs.writeFileSync(outputPath, result);
return result;
}
|
public execute(): CliCommandExecution {
|
random_line_split
|
forceListMetadata.ts
|
/*
* Copyright (c) 2019, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
import {
CliCommandExecution,
CliCommandExecutor,
Command,
CommandOutput,
SfdxCommandBuilder
} from '@salesforce/salesforcedx-utils-vscode/out/src/cli';
import * as fs from 'fs';
import { SfdxCommandletExecutor } from '../commands/util';
import { getRootWorkspacePath } from '../util';
export class ForceListMetadataExecutor extends SfdxCommandletExecutor<string> {
private metadataType: string;
private defaultUsernameOrAlias: string;
private folder?: string;
public constructor(
metadataType: string,
defaultUsernameOrAlias: string,
folder?: string
) {
super();
this.metadataType = metadataType;
this.defaultUsernameOrAlias = defaultUsernameOrAlias;
this.folder = folder;
}
public build(data: {}): Command {
const builder = new SfdxCommandBuilder()
.withArg('force:mdapi:listmetadata')
.withFlag('-m', this.metadataType)
.withFlag('-u', this.defaultUsernameOrAlias)
.withLogName('force_mdapi_listmetadata')
.withJson();
if (this.folder)
|
return builder.build();
}
public execute(): CliCommandExecution {
const startTime = process.hrtime();
const execution = new CliCommandExecutor(this.build({}), {
cwd: getRootWorkspacePath()
}).execute();
execution.processExitSubject.subscribe(() => {
this.logMetric(execution.command.logName, startTime);
});
return execution;
}
}
export async function forceListMetadata(
metadataType: string,
defaultUsernameOrAlias: string,
outputPath: string,
folder?: string
): Promise<string> {
const forceListMetadataExecutor = new ForceListMetadataExecutor(
metadataType,
defaultUsernameOrAlias,
folder
);
const execution = forceListMetadataExecutor.execute();
const cmdOutput = new CommandOutput();
const result = await cmdOutput.getCmdResult(execution);
fs.writeFileSync(outputPath, result);
return result;
}
|
{
builder.withFlag('--folder', this.folder);
}
|
conditional_block
|
forceListMetadata.ts
|
/*
* Copyright (c) 2019, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
import {
CliCommandExecution,
CliCommandExecutor,
Command,
CommandOutput,
SfdxCommandBuilder
} from '@salesforce/salesforcedx-utils-vscode/out/src/cli';
import * as fs from 'fs';
import { SfdxCommandletExecutor } from '../commands/util';
import { getRootWorkspacePath } from '../util';
export class ForceListMetadataExecutor extends SfdxCommandletExecutor<string> {
private metadataType: string;
private defaultUsernameOrAlias: string;
private folder?: string;
public constructor(
metadataType: string,
defaultUsernameOrAlias: string,
folder?: string
) {
super();
this.metadataType = metadataType;
this.defaultUsernameOrAlias = defaultUsernameOrAlias;
this.folder = folder;
}
public build(data: {}): Command {
const builder = new SfdxCommandBuilder()
.withArg('force:mdapi:listmetadata')
.withFlag('-m', this.metadataType)
.withFlag('-u', this.defaultUsernameOrAlias)
.withLogName('force_mdapi_listmetadata')
.withJson();
if (this.folder) {
builder.withFlag('--folder', this.folder);
}
return builder.build();
}
public
|
(): CliCommandExecution {
const startTime = process.hrtime();
const execution = new CliCommandExecutor(this.build({}), {
cwd: getRootWorkspacePath()
}).execute();
execution.processExitSubject.subscribe(() => {
this.logMetric(execution.command.logName, startTime);
});
return execution;
}
}
export async function forceListMetadata(
metadataType: string,
defaultUsernameOrAlias: string,
outputPath: string,
folder?: string
): Promise<string> {
const forceListMetadataExecutor = new ForceListMetadataExecutor(
metadataType,
defaultUsernameOrAlias,
folder
);
const execution = forceListMetadataExecutor.execute();
const cmdOutput = new CommandOutput();
const result = await cmdOutput.getCmdResult(execution);
fs.writeFileSync(outputPath, result);
return result;
}
|
execute
|
identifier_name
|
forceListMetadata.ts
|
/*
* Copyright (c) 2019, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
import {
CliCommandExecution,
CliCommandExecutor,
Command,
CommandOutput,
SfdxCommandBuilder
} from '@salesforce/salesforcedx-utils-vscode/out/src/cli';
import * as fs from 'fs';
import { SfdxCommandletExecutor } from '../commands/util';
import { getRootWorkspacePath } from '../util';
export class ForceListMetadataExecutor extends SfdxCommandletExecutor<string> {
private metadataType: string;
private defaultUsernameOrAlias: string;
private folder?: string;
public constructor(
metadataType: string,
defaultUsernameOrAlias: string,
folder?: string
) {
super();
this.metadataType = metadataType;
this.defaultUsernameOrAlias = defaultUsernameOrAlias;
this.folder = folder;
}
public build(data: {}): Command {
const builder = new SfdxCommandBuilder()
.withArg('force:mdapi:listmetadata')
.withFlag('-m', this.metadataType)
.withFlag('-u', this.defaultUsernameOrAlias)
.withLogName('force_mdapi_listmetadata')
.withJson();
if (this.folder) {
builder.withFlag('--folder', this.folder);
}
return builder.build();
}
public execute(): CliCommandExecution {
const startTime = process.hrtime();
const execution = new CliCommandExecutor(this.build({}), {
cwd: getRootWorkspacePath()
}).execute();
execution.processExitSubject.subscribe(() => {
this.logMetric(execution.command.logName, startTime);
});
return execution;
}
}
export async function forceListMetadata(
metadataType: string,
defaultUsernameOrAlias: string,
outputPath: string,
folder?: string
): Promise<string>
|
{
const forceListMetadataExecutor = new ForceListMetadataExecutor(
metadataType,
defaultUsernameOrAlias,
folder
);
const execution = forceListMetadataExecutor.execute();
const cmdOutput = new CommandOutput();
const result = await cmdOutput.getCmdResult(execution);
fs.writeFileSync(outputPath, result);
return result;
}
|
identifier_body
|
|
conf.py
|
# -*- coding: utf-8 -*-
#
# M2Crypto documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 20 11:15:12 2017.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'M2Crypto'
copyright = u'2017, Matej Cepl <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
|
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'M2Cryptodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'M2Crypto.tex', u'M2Crypto Documentation',
u'Matej Cepl \\textless{}[email protected]\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'm2crypto', u'M2Crypto Documentation',
[u'Matej Cepl <[email protected]>'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'M2Crypto', u'M2Crypto Documentation',
u'Matej Cepl <[email protected]>', 'M2Crypto', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'M2Crypto'
epub_author = u'Matej Cepl <[email protected]>'
epub_publisher = u'Matej Cepl <[email protected]>'
epub_copyright = u'2017, Matej Cepl <[email protected]>'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
|
random_line_split
|
gcs_storage.py
|
import os
import yaml
from google.cloud import storage
from google.oauth2 import service_account
from .storage import Storage
class GcsStorage(Storage):
def __init__(self, bucket, path, project=None, json_path=None):
if bucket is None:
raise ValueError('Bucket must be supplied to GCS storage')
if path is None:
path = 'spinbot/cache'
self.path = path
if json_path is not None:
json_path = os.path.expanduser(json_path)
credentials = service_account.Credentials.from_service_account_file(json_path)
if credentials.requires_scopes:
credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write'])
self.client = storage.Client(project=project, credentials=credentials)
else:
self.client = storage.Client()
if self.client.lookup_bucket(bucket) is None:
self.client.create_bucket(bucket)
self.bucket = self.client.get_bucket(bucket)
super().__init__()
def store(self, key, val):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
props[key] = val
b.upload_from_string(yaml.safe_dump(props))
def
|
(self, key):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
return props.get(key)
|
load
|
identifier_name
|
gcs_storage.py
|
import os
import yaml
from google.cloud import storage
from google.oauth2 import service_account
from .storage import Storage
class GcsStorage(Storage):
def __init__(self, bucket, path, project=None, json_path=None):
if bucket is None:
raise ValueError('Bucket must be supplied to GCS storage')
if path is None:
path = 'spinbot/cache'
self.path = path
if json_path is not None:
json_path = os.path.expanduser(json_path)
credentials = service_account.Credentials.from_service_account_file(json_path)
if credentials.requires_scopes:
credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write'])
self.client = storage.Client(project=project, credentials=credentials)
else:
self.client = storage.Client()
|
self.bucket = self.client.get_bucket(bucket)
super().__init__()
def store(self, key, val):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
props[key] = val
b.upload_from_string(yaml.safe_dump(props))
def load(self, key):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
return props.get(key)
|
if self.client.lookup_bucket(bucket) is None:
self.client.create_bucket(bucket)
|
random_line_split
|
gcs_storage.py
|
import os
import yaml
from google.cloud import storage
from google.oauth2 import service_account
from .storage import Storage
class GcsStorage(Storage):
def __init__(self, bucket, path, project=None, json_path=None):
if bucket is None:
raise ValueError('Bucket must be supplied to GCS storage')
if path is None:
path = 'spinbot/cache'
self.path = path
if json_path is not None:
json_path = os.path.expanduser(json_path)
credentials = service_account.Credentials.from_service_account_file(json_path)
if credentials.requires_scopes:
credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write'])
self.client = storage.Client(project=project, credentials=credentials)
else:
self.client = storage.Client()
if self.client.lookup_bucket(bucket) is None:
self.client.create_bucket(bucket)
self.bucket = self.client.get_bucket(bucket)
super().__init__()
def store(self, key, val):
|
def load(self, key):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
return props.get(key)
|
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
props[key] = val
b.upload_from_string(yaml.safe_dump(props))
|
identifier_body
|
gcs_storage.py
|
import os
import yaml
from google.cloud import storage
from google.oauth2 import service_account
from .storage import Storage
class GcsStorage(Storage):
def __init__(self, bucket, path, project=None, json_path=None):
if bucket is None:
raise ValueError('Bucket must be supplied to GCS storage')
if path is None:
path = 'spinbot/cache'
self.path = path
if json_path is not None:
json_path = os.path.expanduser(json_path)
credentials = service_account.Credentials.from_service_account_file(json_path)
if credentials.requires_scopes:
credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write'])
self.client = storage.Client(project=project, credentials=credentials)
else:
self.client = storage.Client()
if self.client.lookup_bucket(bucket) is None:
self.client.create_bucket(bucket)
self.bucket = self.client.get_bucket(bucket)
super().__init__()
def store(self, key, val):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
props = {}
props[key] = val
b.upload_from_string(yaml.safe_dump(props))
def load(self, key):
b = self.bucket.get_blob(self.path)
contents = '{}'
if b:
contents = b.download_as_string()
else:
b = self.bucket.blob(self.path)
props = yaml.safe_load(contents)
if props is None:
|
return props.get(key)
|
props = {}
|
conditional_block
|
parallelGAModelP_AVR.py
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
|
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
|
conditional_block
|
parallelGAModelP_AVR.py
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
|
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
random_line_split
|
|
parallelGAModelP_AVR.py
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def
|
(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
evaluationFunction
|
identifier_name
|
parallelGAModelP_AVR.py
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
|
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
|
identifier_body
|
simple_tomo_test.py
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: runner for tests using the MPI framework
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
import tempfile
from savu.test import test_utils as tu
from savu.test.plugin_runner_test import run_protected_plugin_runner
class SimpleTomoTest(unittest.TestCase):
def test_process(self):
options = {
"transport": "hdf5",
"process_names": "CPU0",
"data_file": tu.get_test_data_path('24737.nxs'),
|
run_protected_plugin_runner(options)
if __name__ == "__main__":
unittest.main()
|
"process_file": tu.get_test_data_path('simple_recon_test_process.nxs'),
"out_path": tempfile.mkdtemp()
}
|
random_line_split
|
simple_tomo_test.py
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: runner for tests using the MPI framework
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
import tempfile
from savu.test import test_utils as tu
from savu.test.plugin_runner_test import run_protected_plugin_runner
class SimpleTomoTest(unittest.TestCase):
def test_process(self):
options = {
"transport": "hdf5",
"process_names": "CPU0",
"data_file": tu.get_test_data_path('24737.nxs'),
"process_file": tu.get_test_data_path('simple_recon_test_process.nxs'),
"out_path": tempfile.mkdtemp()
}
run_protected_plugin_runner(options)
if __name__ == "__main__":
|
unittest.main()
|
conditional_block
|
|
simple_tomo_test.py
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: runner for tests using the MPI framework
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
import tempfile
from savu.test import test_utils as tu
from savu.test.plugin_runner_test import run_protected_plugin_runner
class SimpleTomoTest(unittest.TestCase):
def test_process(self):
|
if __name__ == "__main__":
unittest.main()
|
options = {
"transport": "hdf5",
"process_names": "CPU0",
"data_file": tu.get_test_data_path('24737.nxs'),
"process_file": tu.get_test_data_path('simple_recon_test_process.nxs'),
"out_path": tempfile.mkdtemp()
}
run_protected_plugin_runner(options)
|
identifier_body
|
simple_tomo_test.py
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: runner for tests using the MPI framework
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
import tempfile
from savu.test import test_utils as tu
from savu.test.plugin_runner_test import run_protected_plugin_runner
class
|
(unittest.TestCase):
def test_process(self):
options = {
"transport": "hdf5",
"process_names": "CPU0",
"data_file": tu.get_test_data_path('24737.nxs'),
"process_file": tu.get_test_data_path('simple_recon_test_process.nxs'),
"out_path": tempfile.mkdtemp()
}
run_protected_plugin_runner(options)
if __name__ == "__main__":
unittest.main()
|
SimpleTomoTest
|
identifier_name
|
block_status.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block status description module
use verification::queue::Status as QueueStatus;
/// General block status
#[derive(Debug, Eq, PartialEq)]
#[cfg_attr(feature = "ipc", binary)]
pub enum
|
{
/// Part of the blockchain.
InChain,
/// Queued for import.
Queued,
/// Known as bad.
Bad,
/// Unknown.
Unknown,
}
impl From<QueueStatus> for BlockStatus {
fn from(status: QueueStatus) -> Self {
match status {
QueueStatus::Queued => BlockStatus::Queued,
QueueStatus::Bad => BlockStatus::Bad,
QueueStatus::Unknown => BlockStatus::Unknown,
}
}
}
|
BlockStatus
|
identifier_name
|
block_status.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block status description module
use verification::queue::Status as QueueStatus;
/// General block status
#[derive(Debug, Eq, PartialEq)]
#[cfg_attr(feature = "ipc", binary)]
pub enum BlockStatus {
/// Part of the blockchain.
InChain,
/// Queued for import.
Queued,
/// Known as bad.
Bad,
/// Unknown.
Unknown,
}
impl From<QueueStatus> for BlockStatus {
fn from(status: QueueStatus) -> Self {
match status {
QueueStatus::Queued => BlockStatus::Queued,
QueueStatus::Bad => BlockStatus::Bad,
QueueStatus::Unknown => BlockStatus::Unknown,
}
}
}
|
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
|
random_line_split
|
waveBlock.ts
|
import { NodeMaterialBlock } from '../nodeMaterialBlock';
import { NodeMaterialBlockConnectionPointTypes } from '../Enums/nodeMaterialBlockConnectionPointTypes';
import { NodeMaterialBuildState } from '../nodeMaterialBuildState';
import { NodeMaterialConnectionPoint } from '../nodeMaterialBlockConnectionPoint';
import { NodeMaterialBlockTargets } from '../Enums/nodeMaterialBlockTargets';
import { RegisterClass } from '../../../Misc/typeStore';
import { Scene } from '../../../scene';
/**
* Operations supported by the Wave block
*/
export enum WaveBlockKind {
/** SawTooth */
SawTooth,
/** Square */
Square,
/** Triangle */
Triangle
}
/**
* Block used to apply wave operation to floats
*/
export class WaveBlock extends NodeMaterialBlock {
/**
* Gets or sets the kibnd of wave to be applied by the block
*/
public kind = WaveBlockKind.SawTooth;
/**
* Creates a new WaveBlock
* @param name defines the block name
*/
public constructor(name: string) {
super(name, NodeMaterialBlockTargets.Neutral);
this.registerInput("input", NodeMaterialBlockConnectionPointTypes.AutoDetect);
this.registerOutput("output", NodeMaterialBlockConnectionPointTypes.BasedOnInput);
this._outputs[0]._typeConnectionSource = this._inputs[0];
this._inputs[0].excludedConnectionPointTypes.push(NodeMaterialBlockConnectionPointTypes.Matrix);
}
/**
* Gets the current class name
* @returns the class name
*/
public getClassName() {
return "WaveBlock";
}
/**
* Gets the input component
*/
public get
|
(): NodeMaterialConnectionPoint {
return this._inputs[0];
}
/**
* Gets the output component
*/
public get output(): NodeMaterialConnectionPoint {
return this._outputs[0];
}
protected _buildBlock(state: NodeMaterialBuildState) {
super._buildBlock(state);
let output = this._outputs[0];
switch (this.kind) {
case WaveBlockKind.SawTooth: {
state.compilationString += this._declareOutput(output, state) + ` = ${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName});\r\n`;
break;
}
case WaveBlockKind.Square: {
state.compilationString += this._declareOutput(output, state) + ` = 1.0 - 2.0 * round(fract(${this.input.associatedVariableName}));\r\n`;
break;
}
case WaveBlockKind.Triangle: {
state.compilationString += this._declareOutput(output, state) + ` = 2.0 * abs(2.0 * (${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName}))) - 1.0;\r\n`;
break;
}
}
return this;
}
public serialize(): any {
let serializationObject = super.serialize();
serializationObject.kind = this.kind;
return serializationObject;
}
public _deserialize(serializationObject: any, scene: Scene, rootUrl: string) {
super._deserialize(serializationObject, scene, rootUrl);
this.kind = serializationObject.kind;
}
}
RegisterClass("BABYLON.WaveBlock", WaveBlock);
|
input
|
identifier_name
|
waveBlock.ts
|
import { NodeMaterialBlock } from '../nodeMaterialBlock';
import { NodeMaterialBlockConnectionPointTypes } from '../Enums/nodeMaterialBlockConnectionPointTypes';
import { NodeMaterialBuildState } from '../nodeMaterialBuildState';
import { NodeMaterialConnectionPoint } from '../nodeMaterialBlockConnectionPoint';
import { NodeMaterialBlockTargets } from '../Enums/nodeMaterialBlockTargets';
import { RegisterClass } from '../../../Misc/typeStore';
import { Scene } from '../../../scene';
/**
* Operations supported by the Wave block
*/
export enum WaveBlockKind {
/** SawTooth */
SawTooth,
/** Square */
Square,
/** Triangle */
Triangle
}
/**
* Block used to apply wave operation to floats
*/
export class WaveBlock extends NodeMaterialBlock {
/**
* Gets or sets the kibnd of wave to be applied by the block
*/
public kind = WaveBlockKind.SawTooth;
/**
* Creates a new WaveBlock
* @param name defines the block name
*/
public constructor(name: string) {
super(name, NodeMaterialBlockTargets.Neutral);
this.registerInput("input", NodeMaterialBlockConnectionPointTypes.AutoDetect);
this.registerOutput("output", NodeMaterialBlockConnectionPointTypes.BasedOnInput);
this._outputs[0]._typeConnectionSource = this._inputs[0];
this._inputs[0].excludedConnectionPointTypes.push(NodeMaterialBlockConnectionPointTypes.Matrix);
}
/**
* Gets the current class name
* @returns the class name
*/
public getClassName() {
return "WaveBlock";
}
/**
* Gets the input component
*/
public get input(): NodeMaterialConnectionPoint {
return this._inputs[0];
}
/**
* Gets the output component
*/
public get output(): NodeMaterialConnectionPoint
|
protected _buildBlock(state: NodeMaterialBuildState) {
super._buildBlock(state);
let output = this._outputs[0];
switch (this.kind) {
case WaveBlockKind.SawTooth: {
state.compilationString += this._declareOutput(output, state) + ` = ${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName});\r\n`;
break;
}
case WaveBlockKind.Square: {
state.compilationString += this._declareOutput(output, state) + ` = 1.0 - 2.0 * round(fract(${this.input.associatedVariableName}));\r\n`;
break;
}
case WaveBlockKind.Triangle: {
state.compilationString += this._declareOutput(output, state) + ` = 2.0 * abs(2.0 * (${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName}))) - 1.0;\r\n`;
break;
}
}
return this;
}
public serialize(): any {
let serializationObject = super.serialize();
serializationObject.kind = this.kind;
return serializationObject;
}
public _deserialize(serializationObject: any, scene: Scene, rootUrl: string) {
super._deserialize(serializationObject, scene, rootUrl);
this.kind = serializationObject.kind;
}
}
RegisterClass("BABYLON.WaveBlock", WaveBlock);
|
{
return this._outputs[0];
}
|
identifier_body
|
waveBlock.ts
|
import { NodeMaterialBlock } from '../nodeMaterialBlock';
import { NodeMaterialBlockConnectionPointTypes } from '../Enums/nodeMaterialBlockConnectionPointTypes';
import { NodeMaterialBuildState } from '../nodeMaterialBuildState';
import { NodeMaterialConnectionPoint } from '../nodeMaterialBlockConnectionPoint';
import { NodeMaterialBlockTargets } from '../Enums/nodeMaterialBlockTargets';
import { RegisterClass } from '../../../Misc/typeStore';
import { Scene } from '../../../scene';
/**
* Operations supported by the Wave block
*/
export enum WaveBlockKind {
/** SawTooth */
SawTooth,
/** Square */
Square,
/** Triangle */
Triangle
|
*/
export class WaveBlock extends NodeMaterialBlock {
/**
* Gets or sets the kibnd of wave to be applied by the block
*/
public kind = WaveBlockKind.SawTooth;
/**
* Creates a new WaveBlock
* @param name defines the block name
*/
public constructor(name: string) {
super(name, NodeMaterialBlockTargets.Neutral);
this.registerInput("input", NodeMaterialBlockConnectionPointTypes.AutoDetect);
this.registerOutput("output", NodeMaterialBlockConnectionPointTypes.BasedOnInput);
this._outputs[0]._typeConnectionSource = this._inputs[0];
this._inputs[0].excludedConnectionPointTypes.push(NodeMaterialBlockConnectionPointTypes.Matrix);
}
/**
* Gets the current class name
* @returns the class name
*/
public getClassName() {
return "WaveBlock";
}
/**
* Gets the input component
*/
public get input(): NodeMaterialConnectionPoint {
return this._inputs[0];
}
/**
* Gets the output component
*/
public get output(): NodeMaterialConnectionPoint {
return this._outputs[0];
}
protected _buildBlock(state: NodeMaterialBuildState) {
super._buildBlock(state);
let output = this._outputs[0];
switch (this.kind) {
case WaveBlockKind.SawTooth: {
state.compilationString += this._declareOutput(output, state) + ` = ${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName});\r\n`;
break;
}
case WaveBlockKind.Square: {
state.compilationString += this._declareOutput(output, state) + ` = 1.0 - 2.0 * round(fract(${this.input.associatedVariableName}));\r\n`;
break;
}
case WaveBlockKind.Triangle: {
state.compilationString += this._declareOutput(output, state) + ` = 2.0 * abs(2.0 * (${this.input.associatedVariableName} - floor(0.5 + ${this.input.associatedVariableName}))) - 1.0;\r\n`;
break;
}
}
return this;
}
public serialize(): any {
let serializationObject = super.serialize();
serializationObject.kind = this.kind;
return serializationObject;
}
public _deserialize(serializationObject: any, scene: Scene, rootUrl: string) {
super._deserialize(serializationObject, scene, rootUrl);
this.kind = serializationObject.kind;
}
}
RegisterClass("BABYLON.WaveBlock", WaveBlock);
|
}
/**
* Block used to apply wave operation to floats
|
random_line_split
|
Site.ts
|
/// <reference path="jquery.d.ts" />
/// <reference path="knockout.d.ts" />
class ServerDevice {
Id: Number;
Name: string;
Description: string;
DisplayOrder: Number;
IsReadOnly: boolean;
}
class Device {
Id: KnockoutObservable<Number> = ko.observable(0);
Name: KnockoutObservable<string> = ko.observable("");
Description: KnockoutObservable<string> = ko.observable("");
DisplayOrder: KnockoutObservable<Number> = ko.observable(0);
IsReadOnly: KnockoutObservable<boolean> = ko.observable(false);
constructor(sdevice: ServerDevice = undefined) {
|
public AsServerDevice(): ServerDevice {
var sd = new ServerDevice();
sd.Id = this.Id();
sd.Name = this.Name();
sd.Description = this.Description();
sd.DisplayOrder = this.DisplayOrder();
sd.IsReadOnly = this.IsReadOnly();
return sd;
}
}
class DevicesViewModel {
private _apiUrl: string;
public Devices: KnockoutObservableArray<Device> = ko.observableArray([]);
public CurrentDevice: KnockoutObservable<Device> = ko.observable(undefined);
constructor() {
this._apiUrl = "/api/Devices";
}
private _devicesReceived(data: ServerDevice[]) {
for (var i = 0; i < data.length; i++) {
this.Devices.push(new Device(data[i]));
}
}
public loadDevices() {
var me = this;
$.ajax({
url: me._apiUrl,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (data) {
me._devicesReceived(data);
}
});
}
public loadDevice(id: Number) {
var me = this;
$.ajax({
url: me._apiUrl + "/" + id,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (device: ServerDevice) {
var found = false;
for (var i = 0; i < me.Devices().length; i++) {
var d = me.Devices()[i];
if (d.Id() == device.Id) {
me.Devices.replace(d, new Device(device));
found = true;
break;
}
}
if (!found) {
me.Devices.push(new Device(device));
}
}
});
}
public AddNewDevice() {
this.CurrentDevice(new Device());
}
private validateDevice(dev: Device): boolean {
var cd = this.CurrentDevice();
var errorMessage: string = "";
if (cd.Name().length <= 0) {
errorMessage = "Name is required.\n";
}
if (cd.Description().length <= 0) {
errorMessage += "Description is required.\n";
}
if (errorMessage.length > 0) {
alert(errorMessage);
}
return errorMessage.length == 0;
}
public SaveNewDevice() {
var cd = this.CurrentDevice();
if (this.validateDevice(cd)) {
var me = this;
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: 'post',
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function (id) {
me.loadDevice(id);
me.CurrentDevice(undefined);
}
});
}
}
public EditDevice(dev: Device) {
this.CurrentDevice(dev);
}
public UpdateDevice() {
var cd = this.CurrentDevice();
var me = this;
if (this.validateDevice(cd)) {
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: "PUT",
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function () {
me.loadDevice(cd.Id());
}
});
}
}
}
|
if (sdevice != undefined) {
this.Id(sdevice.Id);
this.Name(sdevice.Name);
this.Description(sdevice.Description);
this.DisplayOrder(sdevice.DisplayOrder);
this.IsReadOnly(sdevice.IsReadOnly);
}
}
|
identifier_body
|
Site.ts
|
/// <reference path="jquery.d.ts" />
/// <reference path="knockout.d.ts" />
class ServerDevice {
Id: Number;
Name: string;
Description: string;
DisplayOrder: Number;
IsReadOnly: boolean;
}
class Device {
Id: KnockoutObservable<Number> = ko.observable(0);
Name: KnockoutObservable<string> = ko.observable("");
Description: KnockoutObservable<string> = ko.observable("");
DisplayOrder: KnockoutObservable<Number> = ko.observable(0);
IsReadOnly: KnockoutObservable<boolean> = ko.observable(false);
constructor(sdevice: ServerDevice = undefined) {
if (sdevice != undefined) {
this.Id(sdevice.Id);
this.Name(sdevice.Name);
this.Description(sdevice.Description);
this.DisplayOrder(sdevice.DisplayOrder);
this.IsReadOnly(sdevice.IsReadOnly);
}
}
public AsServerDevice(): ServerDevice {
var sd = new ServerDevice();
sd.Id = this.Id();
sd.Name = this.Name();
sd.Description = this.Description();
sd.DisplayOrder = this.DisplayOrder();
sd.IsReadOnly = this.IsReadOnly();
return sd;
}
}
class DevicesViewModel {
private _apiUrl: string;
public Devices: KnockoutObservableArray<Device> = ko.observableArray([]);
public CurrentDevice: KnockoutObservable<Device> = ko.observable(undefined);
constructor() {
this._apiUrl = "/api/Devices";
}
private _devicesReceived(data: ServerDevice[]) {
for (var i = 0; i < data.length; i++) {
this.Devices.push(new Device(data[i]));
}
}
public loadDevices() {
var me = this;
$.ajax({
url: me._apiUrl,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (data) {
me._devicesReceived(data);
}
});
}
public loadDevice(id: Number) {
var me = this;
$.ajax({
url: me._apiUrl + "/" + id,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (device: ServerDevice) {
var found = false;
for (var i = 0; i < me.Devices().length; i++) {
var d = me.Devices()[i];
if (d.Id() == device.Id) {
me.Devices.replace(d, new Device(device));
found = true;
break;
}
}
if (!found) {
|
}
});
}
public AddNewDevice() {
this.CurrentDevice(new Device());
}
private validateDevice(dev: Device): boolean {
var cd = this.CurrentDevice();
var errorMessage: string = "";
if (cd.Name().length <= 0) {
errorMessage = "Name is required.\n";
}
if (cd.Description().length <= 0) {
errorMessage += "Description is required.\n";
}
if (errorMessage.length > 0) {
alert(errorMessage);
}
return errorMessage.length == 0;
}
public SaveNewDevice() {
var cd = this.CurrentDevice();
if (this.validateDevice(cd)) {
var me = this;
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: 'post',
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function (id) {
me.loadDevice(id);
me.CurrentDevice(undefined);
}
});
}
}
public EditDevice(dev: Device) {
this.CurrentDevice(dev);
}
public UpdateDevice() {
var cd = this.CurrentDevice();
var me = this;
if (this.validateDevice(cd)) {
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: "PUT",
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function () {
me.loadDevice(cd.Id());
}
});
}
}
}
|
me.Devices.push(new Device(device));
}
|
conditional_block
|
Site.ts
|
/// <reference path="jquery.d.ts" />
/// <reference path="knockout.d.ts" />
class ServerDevice {
Id: Number;
Name: string;
Description: string;
DisplayOrder: Number;
IsReadOnly: boolean;
}
class Device {
Id: KnockoutObservable<Number> = ko.observable(0);
Name: KnockoutObservable<string> = ko.observable("");
Description: KnockoutObservable<string> = ko.observable("");
DisplayOrder: KnockoutObservable<Number> = ko.observable(0);
IsReadOnly: KnockoutObservable<boolean> = ko.observable(false);
constructor(sdevice: ServerDevice = undefined) {
if (sdevice != undefined) {
this.Id(sdevice.Id);
this.Name(sdevice.Name);
this.Description(sdevice.Description);
this.DisplayOrder(sdevice.DisplayOrder);
this.IsReadOnly(sdevice.IsReadOnly);
}
}
public AsServerDevice(): ServerDevice {
var sd = new ServerDevice();
sd.Id = this.Id();
sd.Name = this.Name();
sd.Description = this.Description();
sd.DisplayOrder = this.DisplayOrder();
sd.IsReadOnly = this.IsReadOnly();
return sd;
}
}
class DevicesViewModel {
private _apiUrl: string;
public Devices: KnockoutObservableArray<Device> = ko.observableArray([]);
public CurrentDevice: KnockoutObservable<Device> = ko.observable(undefined);
constructor() {
this._apiUrl = "/api/Devices";
}
private _devicesReceived(data: ServerDevice[]) {
for (var i = 0; i < data.length; i++) {
this.Devices.push(new Device(data[i]));
}
}
public loadDevices() {
var me = this;
$.ajax({
url: me._apiUrl,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (data) {
me._devicesReceived(data);
}
});
}
public loadDevice(id: Number) {
var me = this;
$.ajax({
url: me._apiUrl + "/" + id,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (device: ServerDevice) {
var found = false;
for (var i = 0; i < me.Devices().length; i++) {
var d = me.Devices()[i];
if (d.Id() == device.Id) {
me.Devices.replace(d, new Device(device));
found = true;
break;
}
}
if (!found) {
me.Devices.push(new Device(device));
}
}
});
}
public AddNewDevice() {
this.CurrentDevice(new Device());
}
private validateDevice(dev: Device): boolean {
var cd = this.CurrentDevice();
var errorMessage: string = "";
if (cd.Name().length <= 0) {
errorMessage = "Name is required.\n";
}
if (cd.Description().length <= 0) {
errorMessage += "Description is required.\n";
}
if (errorMessage.length > 0) {
alert(errorMessage);
}
return errorMessage.length == 0;
}
public SaveNewDevice() {
var cd = this.CurrentDevice();
if (this.validateDevice(cd)) {
var me = this;
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
|
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function (id) {
me.loadDevice(id);
me.CurrentDevice(undefined);
}
});
}
}
public EditDevice(dev: Device) {
this.CurrentDevice(dev);
}
public UpdateDevice() {
var cd = this.CurrentDevice();
var me = this;
if (this.validateDevice(cd)) {
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: "PUT",
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function () {
me.loadDevice(cd.Id());
}
});
}
}
}
|
type: 'post',
contentType: "application/json; charset=utf-8",
|
random_line_split
|
Site.ts
|
/// <reference path="jquery.d.ts" />
/// <reference path="knockout.d.ts" />
class ServerDevice {
Id: Number;
Name: string;
Description: string;
DisplayOrder: Number;
IsReadOnly: boolean;
}
class Device {
Id: KnockoutObservable<Number> = ko.observable(0);
Name: KnockoutObservable<string> = ko.observable("");
Description: KnockoutObservable<string> = ko.observable("");
DisplayOrder: KnockoutObservable<Number> = ko.observable(0);
IsReadOnly: KnockoutObservable<boolean> = ko.observable(false);
constructor(sdevice: ServerDevice = undefined) {
if (sdevice != undefined) {
this.Id(sdevice.Id);
this.Name(sdevice.Name);
this.Description(sdevice.Description);
this.DisplayOrder(sdevice.DisplayOrder);
this.IsReadOnly(sdevice.IsReadOnly);
}
}
public AsServerDevice(): ServerDevice {
var sd = new ServerDevice();
sd.Id = this.Id();
sd.Name = this.Name();
sd.Description = this.Description();
sd.DisplayOrder = this.DisplayOrder();
sd.IsReadOnly = this.IsReadOnly();
return sd;
}
}
class DevicesViewModel {
private _apiUrl: string;
public Devices: KnockoutObservableArray<Device> = ko.observableArray([]);
public CurrentDevice: KnockoutObservable<Device> = ko.observable(undefined);
co
|
{
this._apiUrl = "/api/Devices";
}
private _devicesReceived(data: ServerDevice[]) {
for (var i = 0; i < data.length; i++) {
this.Devices.push(new Device(data[i]));
}
}
public loadDevices() {
var me = this;
$.ajax({
url: me._apiUrl,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (data) {
me._devicesReceived(data);
}
});
}
public loadDevice(id: Number) {
var me = this;
$.ajax({
url: me._apiUrl + "/" + id,
type: 'get',
contentType: "application/json; charset=utf-8",
success: function (device: ServerDevice) {
var found = false;
for (var i = 0; i < me.Devices().length; i++) {
var d = me.Devices()[i];
if (d.Id() == device.Id) {
me.Devices.replace(d, new Device(device));
found = true;
break;
}
}
if (!found) {
me.Devices.push(new Device(device));
}
}
});
}
public AddNewDevice() {
this.CurrentDevice(new Device());
}
private validateDevice(dev: Device): boolean {
var cd = this.CurrentDevice();
var errorMessage: string = "";
if (cd.Name().length <= 0) {
errorMessage = "Name is required.\n";
}
if (cd.Description().length <= 0) {
errorMessage += "Description is required.\n";
}
if (errorMessage.length > 0) {
alert(errorMessage);
}
return errorMessage.length == 0;
}
public SaveNewDevice() {
var cd = this.CurrentDevice();
if (this.validateDevice(cd)) {
var me = this;
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: 'post',
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function (id) {
me.loadDevice(id);
me.CurrentDevice(undefined);
}
});
}
}
public EditDevice(dev: Device) {
this.CurrentDevice(dev);
}
public UpdateDevice() {
var cd = this.CurrentDevice();
var me = this;
if (this.validateDevice(cd)) {
me.CurrentDevice(undefined);
$.ajax({
url: me._apiUrl,
type: "PUT",
contentType: "application/json; charset=utf-8",
data: JSON.stringify(cd.AsServerDevice()),
dataType: "json",
success: function () {
me.loadDevice(cd.Id());
}
});
}
}
}
|
nstructor()
|
identifier_name
|
config.rs
|
use std::collections::{HashMap, HashSet, BTreeMap};
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use crate::types::*;
use serde_json;
use pombase_rc_string::RcString;
// configuration for extension display names and for the "Target of" section
#[derive(Deserialize, Clone, Debug)]
pub struct ExtensionDisplayNames {
pub rel_name: RcString, // name of extension relation
pub display_name: RcString, // text to display
pub if_descendant_of: Option<RcString>, // None if applies to any extension
pub reciprocal_display: Option<RcString>, // None if reciprocal shouldn't be displayed
}
// "interesting parents" are those stored in the JSON in the TermShort structs
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct InterestingParent {
pub termid: RcString,
pub rel_name: RcString,
}
// the order of relations within an extension:
#[derive(Deserialize, Clone, Debug)]
pub struct RelationOrder {
// put the relations in this order in the displayed extensions:
pub relation_order: Vec<RcString>,
// except for these reactions which should always come last:
pub always_last: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct AncestorFilterCategory {
pub display_name: RcString,
// this category matches these terms and their descendants
pub ancestors: Vec<TermId>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FilterConfig {
pub filter_name: String,
pub display_name: String,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub term_categories: Vec<AncestorFilterCategory>,
#[serde(skip_serializing_if="Option::is_none", default)]
pub slim_name: Option<RcString>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub extension_categories: Vec<AncestorFilterCategory>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SplitByParentsConfig {
pub termids: Vec<RcString>,
pub display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ChromosomeConfig {
pub name: RcString,
// string to use for this chromosome in a file name, eg. "chromosome_II"
// or "mitochondrial_chromosome"
pub export_file_id: RcString,
// string to use within files, eg. "II" or "mitochondrial"
pub export_id: RcString,
// eg. "Chromosome II" or "Mitochondrial chromosome"
pub long_display_name: RcString,
// eg. "II" or "Mitochondrial"
pub short_display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvSourceConfig {
// a type name for the cvtermprop to display to the user
pub display_name_prop: Option<RcString>,
// the cvtermprop type name for the ID used for linking
// or "ACCESSION" if the accession ID of the term should be used
pub id_source: Option<RcString>,
}
pub type TargetRelationName = String;
#[derive(Deserialize, Clone, Debug)]
pub struct TargetOfConfig {
// these priorities are used to order the list in the "Target of" section
// and to filter the "Target of" summary
// https://github.com/pombase/website/issues/299
pub relation_priority: HashMap<TargetRelationName, u32>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvConfig {
pub feature_type: RcString,
pub display_name: Option<RcString>,
// filtering configured per CV
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub filters: Vec<FilterConfig>,
// config for splitting cv annotation tables into sub-sections
// based on ancestry
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub split_by_parents: Vec<SplitByParentsConfig>,
// relations to not show in the summary
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relations_to_hide: Vec<RcString>,
// relations where the range is a gene ID to display like:
// has substrate pom1, cdc1 involved in negative regulation of ...
// rather than as two lines
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relation_ranges_to_collect: Vec<RcString>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
// the field to sort by
#[serde(skip_serializing_if="Option::is_none")]
pub sort_details_by: Option<Vec<RcString>>,
// This is the configuration for the "Source" column, a map from
// source name to config
// See Disease association for an example. If there is no config
// there will be no Source column will be displayed
#[serde(skip_serializing_if="HashMap::is_empty", default)]
pub source_config: HashMap<RcString, CvSourceConfig>,
}
pub type ShortEvidenceCode = RcString;
pub type LongEvidenceCode = RcString;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct ConfigOrganism {
pub taxonid: OrganismTaxonId,
pub genus: RcString,
pub species: RcString,
pub alternative_names: Vec<RcString>,
pub assembly_version: Option<RcString>,
}
impl ConfigOrganism {
pub fn full_name(&self) -> String {
self.genus.clone() + "_" + self.species.as_str()
}
pub fn scientific_name(&self) -> String {
self.genus.clone() + " " + self.species.as_str()
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ViabilityTerms {
pub viable: RcString,
pub inviable: RcString,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct TermAndName {
pub termid: RcString,
pub name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ReferencePageConfig {
pub triage_status_to_ignore: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct InterPro {
pub dbnames_to_filter: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerSubsetConfig {
pub prefixes_to_remove: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerConfig {
pub subsets: ServerSubsetConfig,
pub solr_url: String,
pub close_synonym_boost: f32,
pub distant_synonym_boost: f32,
pub term_definition_boost: f32,
pub django_url: String,
pub cv_name_for_terms_search: String,
pub gene_uniquename_re: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct EvidenceDetails {
pub long: LongEvidenceCode,
pub link: Option<RcString>,
}
pub type DatabaseName = RcString;
pub type DatabaseAliases = HashMap<DatabaseName, DatabaseName>;
#[derive(Deserialize, Clone, Debug)]
pub struct MacromolecularComplexesConfig {
pub parent_complex_termid: RcString,
pub excluded_terms: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct RNAcentralConfig {
// SO termids of RNA features to export
pub export_so_ids: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub enum SingleOrMultiLocusConfig {
#[serde(rename = "single")]
Single,
#[serde(rename = "multi")]
Multi,
#[serde(rename = "na")]
NotApplicable
}
impl SingleOrMultiLocusConfig {
pub fn not_applicable() -> SingleOrMultiLocusConfig {
SingleOrMultiLocusConfig::NotApplicable
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ExportColumnConfig {
pub name: RcString,
pub display_name: RcString
}
#[derive(Deserialize, Clone, Debug)]
pub struct AnnotationSubsetConfig {
pub term_ids: Vec<TermId>,
pub file_name: RcString,
pub columns: Vec<ExportColumnConfig>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GpadGpiConfig {
// the term IDs of the three GO aspects
pub go_aspect_terms: HashMap<String, TermId>,
// Map a relation term name to a term ID, unless the term ID is None in
// which case we skip writing this extension part
pub extension_relation_mappings: HashMap<String, Option<TermId>>,
// A map from the SO type of a transcript to the SO type of the gene is
// derives from
pub transcript_gene_so_term_map: HashMap<String, String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FileExportConfig {
pub site_map_term_prefixes: Vec<RcString>,
pub site_map_reference_prefixes: Vec<RcString>,
#[serde(skip_serializing_if="Option::is_none")]
pub macromolecular_complexes: Option<MacromolecularComplexesConfig>,
#[serde(skip_serializing_if="Option::is_none")]
pub rnacentral: Option<RNAcentralConfig>,
pub annotation_subsets: Vec<AnnotationSubsetConfig>,
pub gpad_gpi: GpadGpiConfig,
// the reference to use for ND lines in GPAD/GAF output
pub nd_reference: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisAttrValueConfig {
pub termid: Option<RcString>,
pub name: RcString,
pub bin_start: Option<usize>,
pub bin_end: Option<usize>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisColumnConfig {
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub attr_values: Vec<GeneResultVisAttrValueConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultsConfig {
pub field_config: HashMap<RcString, GeneResultVisColumnConfig>,
pub visualisation_field_names: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SlimConfig {
pub slim_display_name: RcString,
pub cv_name: RcString,
pub terms: Vec<TermAndName>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SeqFeaturePageConfig {
pub so_types_to_show: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExDatasetConfig {
pub name: RcString,
pub pubmed_id: RcString,
pub level_type_termid: RcString,
pub during_termid: RcString,
pub scale: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExpressionConfig {
pub datasets: Vec<GeneExDatasetConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct Config {
pub database_name: RcString,
pub database_long_name: RcString,
pub database_citation: RcString,
pub funder: RcString,
pub site_description: RcString,
pub load_organism_taxonid: Option<OrganismTaxonId>,
pub base_url: RcString,
pub helpdesk_address: RcString,
pub doc_page_aliases: HashMap<String, String>,
pub organisms: Vec<ConfigOrganism>,
pub api_seq_chunk_sizes: Vec<usize>,
pub sequence_feature_page: SeqFeaturePageConfig,
pub extension_display_names: Vec<ExtensionDisplayNames>,
pub extension_relation_order: RelationOrder,
pub evidence_types: HashMap<ShortEvidenceCode, EvidenceDetails>,
pub cv_config: HashMap<CvName, CvConfig>,
pub target_of_config: TargetOfConfig,
// when creating a TermShort struct, for each of these termids if the term has
// an "interesting parent" using the given rel_name, we store it in the
// interesting_parents field of the TermShort
pub interesting_parents: Vec<InterestingParent>,
pub viability_terms: ViabilityTerms,
// slim sets by slim name:
pub slims: HashMap<RcString, SlimConfig>,
pub reference_page_config: ReferencePageConfig,
pub interpro: InterPro,
pub server: ServerConfig,
pub extra_database_aliases: DatabaseAliases,
pub chromosomes: Vec<ChromosomeConfig>,
pub gene_results: GeneResultsConfig,
pub ortholog_taxonids: HashSet<u32>,
pub file_exports: FileExportConfig,
pub gene_expression: GeneExpressionConfig,
}
impl Config {
pub fn read(config_file_name: &str) -> Config {
let file = match File::open(config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", config_file_name, err)
},
}
}
pub fn cv_config_by_name(&self, cv_name: &str) -> CvConfig {
if let Some(config) = self.cv_config.get(cv_name) {
config.clone()
} else {
let empty_cv_config =
CvConfig {
feature_type: "".into(),
display_name: Some("".into()),
single_or_multi_locus: SingleOrMultiLocusConfig::NotApplicable,
filters: vec![],
split_by_parents: vec![],
summary_relations_to_hide: vec![],
summary_relation_ranges_to_collect: vec![],
sort_details_by: None,
source_config: HashMap::new(),
};
if cv_name.starts_with("extension:") {
if cv_name.ends_with(":gene") {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
} else {
CvConfig {
feature_type: "genotype".into(),
..empty_cv_config
}
}
} else {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
}
}
}
pub fn organism_by_taxonid(&self, lookup_taxonid: u32) -> Option<ConfigOrganism> {
for org in &self.organisms {
if org.taxonid == lookup_taxonid {
return Some(org.clone());
}
}
None
}
pub fn
|
(&self) -> Option<ConfigOrganism> {
if let Some(load_organism_taxonid) = self.load_organism_taxonid {
let org = self.organism_by_taxonid(load_organism_taxonid);
if org.is_none() {
panic!("can't find configuration for load_organism_taxonid: {}",
load_organism_taxonid);
}
org
} else {
None
}
}
pub fn find_chromosome_config<'a>(&'a self, chromosome_name: &str)
-> &'a ChromosomeConfig
{
for chr_config in &self.chromosomes {
if chr_config.name == chromosome_name {
return chr_config;
}
}
panic!("can't find chromosome configuration for {}", &chromosome_name);
}
}
pub const POMBASE_ANN_EXT_TERM_CV_NAME: &str = "PomBase annotation extension terms";
pub const ANNOTATION_EXT_REL_PREFIX: &str = "annotation_extension_relation-";
pub enum FeatureRelAnnotationType {
Interaction,
Ortholog,
Paralog,
}
pub struct FeatureRelConfig {
pub rel_type_name: &'static str,
pub annotation_type: FeatureRelAnnotationType,
}
pub const FEATURE_REL_CONFIGS: [FeatureRelConfig; 4] =
[
FeatureRelConfig {
rel_type_name: "interacts_physically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "interacts_genetically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "orthologous_to",
annotation_type: FeatureRelAnnotationType::Ortholog,
},
FeatureRelConfig {
rel_type_name: "paralogous_to",
annotation_type: FeatureRelAnnotationType::Paralog,
},
];
// relations to use when copy annotation to parents (ie. adding the
// annotation of child terms to parents)
pub const DESCENDANT_REL_NAMES: [&str; 7] =
["is_a", "part_of", "regulates", "positively_regulates", "negatively_regulates",
"has_part", "output_of"];
// only consider has_part relations for these ontologies:
pub const HAS_PART_CV_NAMES: [&str; 1] = ["fission_yeast_phenotype"];
// number of genes before (and after) to add to the gene_neighbourhood field
pub const GENE_NEIGHBOURHOOD_DISTANCE: usize = 5;
pub const TRANSCRIPT_FEATURE_TYPES: [&str; 8] =
["snRNA", "rRNA", "mRNA", "snoRNA", "ncRNA", "tRNA", "pseudogenic_transcript",
"transcript"];
pub const TRANSCRIPT_PART_TYPES: [&str; 4] =
["five_prime_UTR", "exon", "pseudogenic_exon", "three_prime_UTR"];
// any feature with a type not in this list or in the two TRANSCRIPT lists above
// will be stored in the other_features map
pub const HANDLED_FEATURE_TYPES: [&str; 7] =
["gene", "pseudogene", "intron", "genotype", "allele", "chromosome", "polypeptide"];
#[derive(Deserialize, Clone, Debug)]
pub struct DocConfig {
pub pages: BTreeMap<RcString, RcString>,
}
impl DocConfig {
pub fn read(doc_config_file_name: &str) -> DocConfig {
let file = match File::open(doc_config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", doc_config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", doc_config_file_name, err)
},
}
}
}
pub struct GoEcoMapping {
mapping: HashMap<(String, String), String>,
}
impl GoEcoMapping {
pub fn read(file_name: &str) -> Result<GoEcoMapping, std::io::Error> {
let file = match File::open(file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", file_name, err)
}
};
let reader = BufReader::new(file);
let mut mapping = HashMap::new();
for line_result in reader.lines() {
match line_result {
Ok(line) => {
if line.starts_with('#') {
continue;
}
let parts: Vec<&str> = line.split('\t').collect();
mapping.insert((String::from(parts[0]), String::from(parts[1])),
String::from(parts[2]));
},
Err(err) => return Err(err)
};
}
Ok(GoEcoMapping {
mapping
})
}
pub fn lookup_default(&self, go_evidence_code: &str) -> Option<String> {
self.mapping.get(&(String::from(go_evidence_code), String::from("Default")))
.map(String::from)
}
pub fn lookup_with_go_ref(&self, go_evidence_code: &str, go_ref: &str)
-> Option<String>
{
self.mapping.get(&(String::from(go_evidence_code), String::from(go_ref)))
.map(String::from)
}
}
|
load_organism
|
identifier_name
|
config.rs
|
use std::collections::{HashMap, HashSet, BTreeMap};
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use crate::types::*;
use serde_json;
use pombase_rc_string::RcString;
// configuration for extension display names and for the "Target of" section
#[derive(Deserialize, Clone, Debug)]
pub struct ExtensionDisplayNames {
pub rel_name: RcString, // name of extension relation
pub display_name: RcString, // text to display
pub if_descendant_of: Option<RcString>, // None if applies to any extension
pub reciprocal_display: Option<RcString>, // None if reciprocal shouldn't be displayed
}
// "interesting parents" are those stored in the JSON in the TermShort structs
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct InterestingParent {
pub termid: RcString,
pub rel_name: RcString,
}
// the order of relations within an extension:
#[derive(Deserialize, Clone, Debug)]
pub struct RelationOrder {
// put the relations in this order in the displayed extensions:
pub relation_order: Vec<RcString>,
// except for these reactions which should always come last:
pub always_last: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct AncestorFilterCategory {
pub display_name: RcString,
// this category matches these terms and their descendants
pub ancestors: Vec<TermId>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FilterConfig {
pub filter_name: String,
pub display_name: String,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub term_categories: Vec<AncestorFilterCategory>,
#[serde(skip_serializing_if="Option::is_none", default)]
pub slim_name: Option<RcString>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub extension_categories: Vec<AncestorFilterCategory>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SplitByParentsConfig {
pub termids: Vec<RcString>,
pub display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ChromosomeConfig {
pub name: RcString,
// string to use for this chromosome in a file name, eg. "chromosome_II"
// or "mitochondrial_chromosome"
pub export_file_id: RcString,
// string to use within files, eg. "II" or "mitochondrial"
pub export_id: RcString,
// eg. "Chromosome II" or "Mitochondrial chromosome"
pub long_display_name: RcString,
// eg. "II" or "Mitochondrial"
pub short_display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvSourceConfig {
// a type name for the cvtermprop to display to the user
pub display_name_prop: Option<RcString>,
// the cvtermprop type name for the ID used for linking
// or "ACCESSION" if the accession ID of the term should be used
pub id_source: Option<RcString>,
}
pub type TargetRelationName = String;
#[derive(Deserialize, Clone, Debug)]
pub struct TargetOfConfig {
// these priorities are used to order the list in the "Target of" section
// and to filter the "Target of" summary
// https://github.com/pombase/website/issues/299
pub relation_priority: HashMap<TargetRelationName, u32>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvConfig {
pub feature_type: RcString,
pub display_name: Option<RcString>,
// filtering configured per CV
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub filters: Vec<FilterConfig>,
// config for splitting cv annotation tables into sub-sections
// based on ancestry
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub split_by_parents: Vec<SplitByParentsConfig>,
// relations to not show in the summary
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relations_to_hide: Vec<RcString>,
// relations where the range is a gene ID to display like:
// has substrate pom1, cdc1 involved in negative regulation of ...
// rather than as two lines
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relation_ranges_to_collect: Vec<RcString>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
// the field to sort by
#[serde(skip_serializing_if="Option::is_none")]
pub sort_details_by: Option<Vec<RcString>>,
// This is the configuration for the "Source" column, a map from
// source name to config
// See Disease association for an example. If there is no config
// there will be no Source column will be displayed
#[serde(skip_serializing_if="HashMap::is_empty", default)]
pub source_config: HashMap<RcString, CvSourceConfig>,
}
pub type ShortEvidenceCode = RcString;
pub type LongEvidenceCode = RcString;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct ConfigOrganism {
pub taxonid: OrganismTaxonId,
pub genus: RcString,
pub species: RcString,
pub alternative_names: Vec<RcString>,
pub assembly_version: Option<RcString>,
}
impl ConfigOrganism {
pub fn full_name(&self) -> String {
self.genus.clone() + "_" + self.species.as_str()
}
pub fn scientific_name(&self) -> String {
self.genus.clone() + " " + self.species.as_str()
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ViabilityTerms {
pub viable: RcString,
pub inviable: RcString,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct TermAndName {
pub termid: RcString,
pub name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ReferencePageConfig {
pub triage_status_to_ignore: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct InterPro {
pub dbnames_to_filter: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerSubsetConfig {
pub prefixes_to_remove: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerConfig {
pub subsets: ServerSubsetConfig,
pub solr_url: String,
pub close_synonym_boost: f32,
pub distant_synonym_boost: f32,
pub term_definition_boost: f32,
pub django_url: String,
pub cv_name_for_terms_search: String,
pub gene_uniquename_re: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct EvidenceDetails {
pub long: LongEvidenceCode,
|
#[derive(Deserialize, Clone, Debug)]
pub struct MacromolecularComplexesConfig {
pub parent_complex_termid: RcString,
pub excluded_terms: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct RNAcentralConfig {
// SO termids of RNA features to export
pub export_so_ids: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub enum SingleOrMultiLocusConfig {
#[serde(rename = "single")]
Single,
#[serde(rename = "multi")]
Multi,
#[serde(rename = "na")]
NotApplicable
}
impl SingleOrMultiLocusConfig {
pub fn not_applicable() -> SingleOrMultiLocusConfig {
SingleOrMultiLocusConfig::NotApplicable
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ExportColumnConfig {
pub name: RcString,
pub display_name: RcString
}
#[derive(Deserialize, Clone, Debug)]
pub struct AnnotationSubsetConfig {
pub term_ids: Vec<TermId>,
pub file_name: RcString,
pub columns: Vec<ExportColumnConfig>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GpadGpiConfig {
// the term IDs of the three GO aspects
pub go_aspect_terms: HashMap<String, TermId>,
// Map a relation term name to a term ID, unless the term ID is None in
// which case we skip writing this extension part
pub extension_relation_mappings: HashMap<String, Option<TermId>>,
// A map from the SO type of a transcript to the SO type of the gene is
// derives from
pub transcript_gene_so_term_map: HashMap<String, String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FileExportConfig {
pub site_map_term_prefixes: Vec<RcString>,
pub site_map_reference_prefixes: Vec<RcString>,
#[serde(skip_serializing_if="Option::is_none")]
pub macromolecular_complexes: Option<MacromolecularComplexesConfig>,
#[serde(skip_serializing_if="Option::is_none")]
pub rnacentral: Option<RNAcentralConfig>,
pub annotation_subsets: Vec<AnnotationSubsetConfig>,
pub gpad_gpi: GpadGpiConfig,
// the reference to use for ND lines in GPAD/GAF output
pub nd_reference: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisAttrValueConfig {
pub termid: Option<RcString>,
pub name: RcString,
pub bin_start: Option<usize>,
pub bin_end: Option<usize>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisColumnConfig {
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub attr_values: Vec<GeneResultVisAttrValueConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultsConfig {
pub field_config: HashMap<RcString, GeneResultVisColumnConfig>,
pub visualisation_field_names: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SlimConfig {
pub slim_display_name: RcString,
pub cv_name: RcString,
pub terms: Vec<TermAndName>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SeqFeaturePageConfig {
pub so_types_to_show: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExDatasetConfig {
pub name: RcString,
pub pubmed_id: RcString,
pub level_type_termid: RcString,
pub during_termid: RcString,
pub scale: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExpressionConfig {
pub datasets: Vec<GeneExDatasetConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct Config {
pub database_name: RcString,
pub database_long_name: RcString,
pub database_citation: RcString,
pub funder: RcString,
pub site_description: RcString,
pub load_organism_taxonid: Option<OrganismTaxonId>,
pub base_url: RcString,
pub helpdesk_address: RcString,
pub doc_page_aliases: HashMap<String, String>,
pub organisms: Vec<ConfigOrganism>,
pub api_seq_chunk_sizes: Vec<usize>,
pub sequence_feature_page: SeqFeaturePageConfig,
pub extension_display_names: Vec<ExtensionDisplayNames>,
pub extension_relation_order: RelationOrder,
pub evidence_types: HashMap<ShortEvidenceCode, EvidenceDetails>,
pub cv_config: HashMap<CvName, CvConfig>,
pub target_of_config: TargetOfConfig,
// when creating a TermShort struct, for each of these termids if the term has
// an "interesting parent" using the given rel_name, we store it in the
// interesting_parents field of the TermShort
pub interesting_parents: Vec<InterestingParent>,
pub viability_terms: ViabilityTerms,
// slim sets by slim name:
pub slims: HashMap<RcString, SlimConfig>,
pub reference_page_config: ReferencePageConfig,
pub interpro: InterPro,
pub server: ServerConfig,
pub extra_database_aliases: DatabaseAliases,
pub chromosomes: Vec<ChromosomeConfig>,
pub gene_results: GeneResultsConfig,
pub ortholog_taxonids: HashSet<u32>,
pub file_exports: FileExportConfig,
pub gene_expression: GeneExpressionConfig,
}
impl Config {
pub fn read(config_file_name: &str) -> Config {
let file = match File::open(config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", config_file_name, err)
},
}
}
pub fn cv_config_by_name(&self, cv_name: &str) -> CvConfig {
if let Some(config) = self.cv_config.get(cv_name) {
config.clone()
} else {
let empty_cv_config =
CvConfig {
feature_type: "".into(),
display_name: Some("".into()),
single_or_multi_locus: SingleOrMultiLocusConfig::NotApplicable,
filters: vec![],
split_by_parents: vec![],
summary_relations_to_hide: vec![],
summary_relation_ranges_to_collect: vec![],
sort_details_by: None,
source_config: HashMap::new(),
};
if cv_name.starts_with("extension:") {
if cv_name.ends_with(":gene") {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
} else {
CvConfig {
feature_type: "genotype".into(),
..empty_cv_config
}
}
} else {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
}
}
}
pub fn organism_by_taxonid(&self, lookup_taxonid: u32) -> Option<ConfigOrganism> {
for org in &self.organisms {
if org.taxonid == lookup_taxonid {
return Some(org.clone());
}
}
None
}
pub fn load_organism(&self) -> Option<ConfigOrganism> {
if let Some(load_organism_taxonid) = self.load_organism_taxonid {
let org = self.organism_by_taxonid(load_organism_taxonid);
if org.is_none() {
panic!("can't find configuration for load_organism_taxonid: {}",
load_organism_taxonid);
}
org
} else {
None
}
}
pub fn find_chromosome_config<'a>(&'a self, chromosome_name: &str)
-> &'a ChromosomeConfig
{
for chr_config in &self.chromosomes {
if chr_config.name == chromosome_name {
return chr_config;
}
}
panic!("can't find chromosome configuration for {}", &chromosome_name);
}
}
pub const POMBASE_ANN_EXT_TERM_CV_NAME: &str = "PomBase annotation extension terms";
pub const ANNOTATION_EXT_REL_PREFIX: &str = "annotation_extension_relation-";
pub enum FeatureRelAnnotationType {
Interaction,
Ortholog,
Paralog,
}
pub struct FeatureRelConfig {
pub rel_type_name: &'static str,
pub annotation_type: FeatureRelAnnotationType,
}
pub const FEATURE_REL_CONFIGS: [FeatureRelConfig; 4] =
[
FeatureRelConfig {
rel_type_name: "interacts_physically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "interacts_genetically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "orthologous_to",
annotation_type: FeatureRelAnnotationType::Ortholog,
},
FeatureRelConfig {
rel_type_name: "paralogous_to",
annotation_type: FeatureRelAnnotationType::Paralog,
},
];
// relations to use when copy annotation to parents (ie. adding the
// annotation of child terms to parents)
pub const DESCENDANT_REL_NAMES: [&str; 7] =
["is_a", "part_of", "regulates", "positively_regulates", "negatively_regulates",
"has_part", "output_of"];
// only consider has_part relations for these ontologies:
pub const HAS_PART_CV_NAMES: [&str; 1] = ["fission_yeast_phenotype"];
// number of genes before (and after) to add to the gene_neighbourhood field
pub const GENE_NEIGHBOURHOOD_DISTANCE: usize = 5;
pub const TRANSCRIPT_FEATURE_TYPES: [&str; 8] =
["snRNA", "rRNA", "mRNA", "snoRNA", "ncRNA", "tRNA", "pseudogenic_transcript",
"transcript"];
pub const TRANSCRIPT_PART_TYPES: [&str; 4] =
["five_prime_UTR", "exon", "pseudogenic_exon", "three_prime_UTR"];
// any feature with a type not in this list or in the two TRANSCRIPT lists above
// will be stored in the other_features map
pub const HANDLED_FEATURE_TYPES: [&str; 7] =
["gene", "pseudogene", "intron", "genotype", "allele", "chromosome", "polypeptide"];
#[derive(Deserialize, Clone, Debug)]
pub struct DocConfig {
pub pages: BTreeMap<RcString, RcString>,
}
impl DocConfig {
pub fn read(doc_config_file_name: &str) -> DocConfig {
let file = match File::open(doc_config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", doc_config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", doc_config_file_name, err)
},
}
}
}
pub struct GoEcoMapping {
mapping: HashMap<(String, String), String>,
}
impl GoEcoMapping {
pub fn read(file_name: &str) -> Result<GoEcoMapping, std::io::Error> {
let file = match File::open(file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", file_name, err)
}
};
let reader = BufReader::new(file);
let mut mapping = HashMap::new();
for line_result in reader.lines() {
match line_result {
Ok(line) => {
if line.starts_with('#') {
continue;
}
let parts: Vec<&str> = line.split('\t').collect();
mapping.insert((String::from(parts[0]), String::from(parts[1])),
String::from(parts[2]));
},
Err(err) => return Err(err)
};
}
Ok(GoEcoMapping {
mapping
})
}
pub fn lookup_default(&self, go_evidence_code: &str) -> Option<String> {
self.mapping.get(&(String::from(go_evidence_code), String::from("Default")))
.map(String::from)
}
pub fn lookup_with_go_ref(&self, go_evidence_code: &str, go_ref: &str)
-> Option<String>
{
self.mapping.get(&(String::from(go_evidence_code), String::from(go_ref)))
.map(String::from)
}
}
|
pub link: Option<RcString>,
}
pub type DatabaseName = RcString;
pub type DatabaseAliases = HashMap<DatabaseName, DatabaseName>;
|
random_line_split
|
config.rs
|
use std::collections::{HashMap, HashSet, BTreeMap};
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use crate::types::*;
use serde_json;
use pombase_rc_string::RcString;
// configuration for extension display names and for the "Target of" section
#[derive(Deserialize, Clone, Debug)]
pub struct ExtensionDisplayNames {
pub rel_name: RcString, // name of extension relation
pub display_name: RcString, // text to display
pub if_descendant_of: Option<RcString>, // None if applies to any extension
pub reciprocal_display: Option<RcString>, // None if reciprocal shouldn't be displayed
}
// "interesting parents" are those stored in the JSON in the TermShort structs
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct InterestingParent {
pub termid: RcString,
pub rel_name: RcString,
}
// the order of relations within an extension:
#[derive(Deserialize, Clone, Debug)]
pub struct RelationOrder {
// put the relations in this order in the displayed extensions:
pub relation_order: Vec<RcString>,
// except for these reactions which should always come last:
pub always_last: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct AncestorFilterCategory {
pub display_name: RcString,
// this category matches these terms and their descendants
pub ancestors: Vec<TermId>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FilterConfig {
pub filter_name: String,
pub display_name: String,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub term_categories: Vec<AncestorFilterCategory>,
#[serde(skip_serializing_if="Option::is_none", default)]
pub slim_name: Option<RcString>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub extension_categories: Vec<AncestorFilterCategory>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SplitByParentsConfig {
pub termids: Vec<RcString>,
pub display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ChromosomeConfig {
pub name: RcString,
// string to use for this chromosome in a file name, eg. "chromosome_II"
// or "mitochondrial_chromosome"
pub export_file_id: RcString,
// string to use within files, eg. "II" or "mitochondrial"
pub export_id: RcString,
// eg. "Chromosome II" or "Mitochondrial chromosome"
pub long_display_name: RcString,
// eg. "II" or "Mitochondrial"
pub short_display_name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvSourceConfig {
// a type name for the cvtermprop to display to the user
pub display_name_prop: Option<RcString>,
// the cvtermprop type name for the ID used for linking
// or "ACCESSION" if the accession ID of the term should be used
pub id_source: Option<RcString>,
}
pub type TargetRelationName = String;
#[derive(Deserialize, Clone, Debug)]
pub struct TargetOfConfig {
// these priorities are used to order the list in the "Target of" section
// and to filter the "Target of" summary
// https://github.com/pombase/website/issues/299
pub relation_priority: HashMap<TargetRelationName, u32>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct CvConfig {
pub feature_type: RcString,
pub display_name: Option<RcString>,
// filtering configured per CV
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub filters: Vec<FilterConfig>,
// config for splitting cv annotation tables into sub-sections
// based on ancestry
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub split_by_parents: Vec<SplitByParentsConfig>,
// relations to not show in the summary
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relations_to_hide: Vec<RcString>,
// relations where the range is a gene ID to display like:
// has substrate pom1, cdc1 involved in negative regulation of ...
// rather than as two lines
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub summary_relation_ranges_to_collect: Vec<RcString>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
// the field to sort by
#[serde(skip_serializing_if="Option::is_none")]
pub sort_details_by: Option<Vec<RcString>>,
// This is the configuration for the "Source" column, a map from
// source name to config
// See Disease association for an example. If there is no config
// there will be no Source column will be displayed
#[serde(skip_serializing_if="HashMap::is_empty", default)]
pub source_config: HashMap<RcString, CvSourceConfig>,
}
pub type ShortEvidenceCode = RcString;
pub type LongEvidenceCode = RcString;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct ConfigOrganism {
pub taxonid: OrganismTaxonId,
pub genus: RcString,
pub species: RcString,
pub alternative_names: Vec<RcString>,
pub assembly_version: Option<RcString>,
}
impl ConfigOrganism {
pub fn full_name(&self) -> String {
self.genus.clone() + "_" + self.species.as_str()
}
pub fn scientific_name(&self) -> String {
self.genus.clone() + " " + self.species.as_str()
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ViabilityTerms {
pub viable: RcString,
pub inviable: RcString,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct TermAndName {
pub termid: RcString,
pub name: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ReferencePageConfig {
pub triage_status_to_ignore: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct InterPro {
pub dbnames_to_filter: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerSubsetConfig {
pub prefixes_to_remove: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct ServerConfig {
pub subsets: ServerSubsetConfig,
pub solr_url: String,
pub close_synonym_boost: f32,
pub distant_synonym_boost: f32,
pub term_definition_boost: f32,
pub django_url: String,
pub cv_name_for_terms_search: String,
pub gene_uniquename_re: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct EvidenceDetails {
pub long: LongEvidenceCode,
pub link: Option<RcString>,
}
pub type DatabaseName = RcString;
pub type DatabaseAliases = HashMap<DatabaseName, DatabaseName>;
#[derive(Deserialize, Clone, Debug)]
pub struct MacromolecularComplexesConfig {
pub parent_complex_termid: RcString,
pub excluded_terms: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct RNAcentralConfig {
// SO termids of RNA features to export
pub export_so_ids: HashSet<RcString>,
}
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub enum SingleOrMultiLocusConfig {
#[serde(rename = "single")]
Single,
#[serde(rename = "multi")]
Multi,
#[serde(rename = "na")]
NotApplicable
}
impl SingleOrMultiLocusConfig {
pub fn not_applicable() -> SingleOrMultiLocusConfig {
SingleOrMultiLocusConfig::NotApplicable
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct ExportColumnConfig {
pub name: RcString,
pub display_name: RcString
}
#[derive(Deserialize, Clone, Debug)]
pub struct AnnotationSubsetConfig {
pub term_ids: Vec<TermId>,
pub file_name: RcString,
pub columns: Vec<ExportColumnConfig>,
#[serde(default="SingleOrMultiLocusConfig::not_applicable")]
pub single_or_multi_locus: SingleOrMultiLocusConfig,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GpadGpiConfig {
// the term IDs of the three GO aspects
pub go_aspect_terms: HashMap<String, TermId>,
// Map a relation term name to a term ID, unless the term ID is None in
// which case we skip writing this extension part
pub extension_relation_mappings: HashMap<String, Option<TermId>>,
// A map from the SO type of a transcript to the SO type of the gene is
// derives from
pub transcript_gene_so_term_map: HashMap<String, String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct FileExportConfig {
pub site_map_term_prefixes: Vec<RcString>,
pub site_map_reference_prefixes: Vec<RcString>,
#[serde(skip_serializing_if="Option::is_none")]
pub macromolecular_complexes: Option<MacromolecularComplexesConfig>,
#[serde(skip_serializing_if="Option::is_none")]
pub rnacentral: Option<RNAcentralConfig>,
pub annotation_subsets: Vec<AnnotationSubsetConfig>,
pub gpad_gpi: GpadGpiConfig,
// the reference to use for ND lines in GPAD/GAF output
pub nd_reference: String,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisAttrValueConfig {
pub termid: Option<RcString>,
pub name: RcString,
pub bin_start: Option<usize>,
pub bin_end: Option<usize>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultVisColumnConfig {
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub attr_values: Vec<GeneResultVisAttrValueConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneResultsConfig {
pub field_config: HashMap<RcString, GeneResultVisColumnConfig>,
pub visualisation_field_names: Vec<RcString>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SlimConfig {
pub slim_display_name: RcString,
pub cv_name: RcString,
pub terms: Vec<TermAndName>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct SeqFeaturePageConfig {
pub so_types_to_show: Vec<String>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExDatasetConfig {
pub name: RcString,
pub pubmed_id: RcString,
pub level_type_termid: RcString,
pub during_termid: RcString,
pub scale: RcString,
}
#[derive(Deserialize, Clone, Debug)]
pub struct GeneExpressionConfig {
pub datasets: Vec<GeneExDatasetConfig>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct Config {
pub database_name: RcString,
pub database_long_name: RcString,
pub database_citation: RcString,
pub funder: RcString,
pub site_description: RcString,
pub load_organism_taxonid: Option<OrganismTaxonId>,
pub base_url: RcString,
pub helpdesk_address: RcString,
pub doc_page_aliases: HashMap<String, String>,
pub organisms: Vec<ConfigOrganism>,
pub api_seq_chunk_sizes: Vec<usize>,
pub sequence_feature_page: SeqFeaturePageConfig,
pub extension_display_names: Vec<ExtensionDisplayNames>,
pub extension_relation_order: RelationOrder,
pub evidence_types: HashMap<ShortEvidenceCode, EvidenceDetails>,
pub cv_config: HashMap<CvName, CvConfig>,
pub target_of_config: TargetOfConfig,
// when creating a TermShort struct, for each of these termids if the term has
// an "interesting parent" using the given rel_name, we store it in the
// interesting_parents field of the TermShort
pub interesting_parents: Vec<InterestingParent>,
pub viability_terms: ViabilityTerms,
// slim sets by slim name:
pub slims: HashMap<RcString, SlimConfig>,
pub reference_page_config: ReferencePageConfig,
pub interpro: InterPro,
pub server: ServerConfig,
pub extra_database_aliases: DatabaseAliases,
pub chromosomes: Vec<ChromosomeConfig>,
pub gene_results: GeneResultsConfig,
pub ortholog_taxonids: HashSet<u32>,
pub file_exports: FileExportConfig,
pub gene_expression: GeneExpressionConfig,
}
impl Config {
pub fn read(config_file_name: &str) -> Config {
let file = match File::open(config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", config_file_name, err)
},
}
}
pub fn cv_config_by_name(&self, cv_name: &str) -> CvConfig {
if let Some(config) = self.cv_config.get(cv_name) {
config.clone()
} else {
let empty_cv_config =
CvConfig {
feature_type: "".into(),
display_name: Some("".into()),
single_or_multi_locus: SingleOrMultiLocusConfig::NotApplicable,
filters: vec![],
split_by_parents: vec![],
summary_relations_to_hide: vec![],
summary_relation_ranges_to_collect: vec![],
sort_details_by: None,
source_config: HashMap::new(),
};
if cv_name.starts_with("extension:") {
if cv_name.ends_with(":gene") {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
} else
|
} else {
CvConfig {
feature_type: "gene".into(),
..empty_cv_config
}
}
}
}
pub fn organism_by_taxonid(&self, lookup_taxonid: u32) -> Option<ConfigOrganism> {
for org in &self.organisms {
if org.taxonid == lookup_taxonid {
return Some(org.clone());
}
}
None
}
pub fn load_organism(&self) -> Option<ConfigOrganism> {
if let Some(load_organism_taxonid) = self.load_organism_taxonid {
let org = self.organism_by_taxonid(load_organism_taxonid);
if org.is_none() {
panic!("can't find configuration for load_organism_taxonid: {}",
load_organism_taxonid);
}
org
} else {
None
}
}
pub fn find_chromosome_config<'a>(&'a self, chromosome_name: &str)
-> &'a ChromosomeConfig
{
for chr_config in &self.chromosomes {
if chr_config.name == chromosome_name {
return chr_config;
}
}
panic!("can't find chromosome configuration for {}", &chromosome_name);
}
}
pub const POMBASE_ANN_EXT_TERM_CV_NAME: &str = "PomBase annotation extension terms";
pub const ANNOTATION_EXT_REL_PREFIX: &str = "annotation_extension_relation-";
pub enum FeatureRelAnnotationType {
Interaction,
Ortholog,
Paralog,
}
pub struct FeatureRelConfig {
pub rel_type_name: &'static str,
pub annotation_type: FeatureRelAnnotationType,
}
pub const FEATURE_REL_CONFIGS: [FeatureRelConfig; 4] =
[
FeatureRelConfig {
rel_type_name: "interacts_physically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "interacts_genetically",
annotation_type: FeatureRelAnnotationType::Interaction,
},
FeatureRelConfig {
rel_type_name: "orthologous_to",
annotation_type: FeatureRelAnnotationType::Ortholog,
},
FeatureRelConfig {
rel_type_name: "paralogous_to",
annotation_type: FeatureRelAnnotationType::Paralog,
},
];
// relations to use when copy annotation to parents (ie. adding the
// annotation of child terms to parents)
pub const DESCENDANT_REL_NAMES: [&str; 7] =
["is_a", "part_of", "regulates", "positively_regulates", "negatively_regulates",
"has_part", "output_of"];
// only consider has_part relations for these ontologies:
pub const HAS_PART_CV_NAMES: [&str; 1] = ["fission_yeast_phenotype"];
// number of genes before (and after) to add to the gene_neighbourhood field
pub const GENE_NEIGHBOURHOOD_DISTANCE: usize = 5;
pub const TRANSCRIPT_FEATURE_TYPES: [&str; 8] =
["snRNA", "rRNA", "mRNA", "snoRNA", "ncRNA", "tRNA", "pseudogenic_transcript",
"transcript"];
pub const TRANSCRIPT_PART_TYPES: [&str; 4] =
["five_prime_UTR", "exon", "pseudogenic_exon", "three_prime_UTR"];
// any feature with a type not in this list or in the two TRANSCRIPT lists above
// will be stored in the other_features map
pub const HANDLED_FEATURE_TYPES: [&str; 7] =
["gene", "pseudogene", "intron", "genotype", "allele", "chromosome", "polypeptide"];
#[derive(Deserialize, Clone, Debug)]
pub struct DocConfig {
pub pages: BTreeMap<RcString, RcString>,
}
impl DocConfig {
pub fn read(doc_config_file_name: &str) -> DocConfig {
let file = match File::open(doc_config_file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", doc_config_file_name, err)
}
};
let reader = BufReader::new(file);
match serde_json::from_reader(reader) {
Ok(config) => config,
Err(err) => {
panic!("failed to parse {}: {}", doc_config_file_name, err)
},
}
}
}
pub struct GoEcoMapping {
mapping: HashMap<(String, String), String>,
}
impl GoEcoMapping {
pub fn read(file_name: &str) -> Result<GoEcoMapping, std::io::Error> {
let file = match File::open(file_name) {
Ok(file) => file,
Err(err) => {
panic!("Failed to read {}: {}\n", file_name, err)
}
};
let reader = BufReader::new(file);
let mut mapping = HashMap::new();
for line_result in reader.lines() {
match line_result {
Ok(line) => {
if line.starts_with('#') {
continue;
}
let parts: Vec<&str> = line.split('\t').collect();
mapping.insert((String::from(parts[0]), String::from(parts[1])),
String::from(parts[2]));
},
Err(err) => return Err(err)
};
}
Ok(GoEcoMapping {
mapping
})
}
pub fn lookup_default(&self, go_evidence_code: &str) -> Option<String> {
self.mapping.get(&(String::from(go_evidence_code), String::from("Default")))
.map(String::from)
}
pub fn lookup_with_go_ref(&self, go_evidence_code: &str, go_ref: &str)
-> Option<String>
{
self.mapping.get(&(String::from(go_evidence_code), String::from(go_ref)))
.map(String::from)
}
}
|
{
CvConfig {
feature_type: "genotype".into(),
..empty_cv_config
}
}
|
conditional_block
|
__init__.py
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.api.factories import viewset_factory
from shoop.core.api.orders import OrderViewSet
from shoop.core.api.products import ProductViewSet, ShopProductViewSet
from shoop.core.models import Contact, Shop
from shoop.core.models.categories import Category
def populate_core_api(router):
|
"""
:param router: Router
:type router: rest_framework.routers.DefaultRouter
"""
router.register("shoop/category", viewset_factory(Category))
router.register("shoop/contact", viewset_factory(Contact))
router.register("shoop/order", OrderViewSet)
router.register("shoop/product", ProductViewSet)
router.register("shoop/shop", viewset_factory(Shop))
router.register("shoop/shop_product", ShopProductViewSet)
|
identifier_body
|
|
__init__.py
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.api.factories import viewset_factory
from shoop.core.api.orders import OrderViewSet
from shoop.core.api.products import ProductViewSet, ShopProductViewSet
|
from shoop.core.models.categories import Category
def populate_core_api(router):
"""
:param router: Router
:type router: rest_framework.routers.DefaultRouter
"""
router.register("shoop/category", viewset_factory(Category))
router.register("shoop/contact", viewset_factory(Contact))
router.register("shoop/order", OrderViewSet)
router.register("shoop/product", ProductViewSet)
router.register("shoop/shop", viewset_factory(Shop))
router.register("shoop/shop_product", ShopProductViewSet)
|
from shoop.core.models import Contact, Shop
|
random_line_split
|
__init__.py
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.api.factories import viewset_factory
from shoop.core.api.orders import OrderViewSet
from shoop.core.api.products import ProductViewSet, ShopProductViewSet
from shoop.core.models import Contact, Shop
from shoop.core.models.categories import Category
def
|
(router):
"""
:param router: Router
:type router: rest_framework.routers.DefaultRouter
"""
router.register("shoop/category", viewset_factory(Category))
router.register("shoop/contact", viewset_factory(Contact))
router.register("shoop/order", OrderViewSet)
router.register("shoop/product", ProductViewSet)
router.register("shoop/shop", viewset_factory(Shop))
router.register("shoop/shop_product", ShopProductViewSet)
|
populate_core_api
|
identifier_name
|
forbidden-name.directive.ts
|
import { Directive, Input, OnChanges, SimpleChanges } from '@angular/core';
import { AbstractControl, NG_VALIDATORS, Validator, ValidatorFn, Validators } from '@angular/forms';
export function forbiddenNameValidator(nameRe: RegExp): ValidatorFn
|
@Directive({
selector: '[forbiddenName]',
providers: [{provide: NG_VALIDATORS, useExisting: ForbiddenValidatorDirective, multi: true}]
})
export class ForbiddenValidatorDirective implements Validator, OnChanges {
@Input() public forbiddenName: string;
private valFn = Validators.nullValidator;
public ngOnChanges(changes: SimpleChanges): void {
// const change = changes['forbiddenName'];
// if (change) {
// const val: string | RegExp = change.currentValue;
// const re = val instanceof RegExp ? val : new RegExp(val, 'i');
// this.valFn = forbiddenNameValidator(re);
// } else {
// this.valFn = Validators.nullValidator;
// }
}
public validate(control: AbstractControl): {[key: string]: any} {
return this.valFn(control);
}
}
|
{
return (control: AbstractControl): {[key: string]: any} => {
const name = control.value;
const no = nameRe.test(name);
return no ? {forbiddenName: {name}} : null;
};
}
|
identifier_body
|
forbidden-name.directive.ts
|
import { Directive, Input, OnChanges, SimpleChanges } from '@angular/core';
import { AbstractControl, NG_VALIDATORS, Validator, ValidatorFn, Validators } from '@angular/forms';
export function forbiddenNameValidator(nameRe: RegExp): ValidatorFn {
return (control: AbstractControl): {[key: string]: any} => {
const name = control.value;
const no = nameRe.test(name);
return no ? {forbiddenName: {name}} : null;
};
}
@Directive({
selector: '[forbiddenName]',
providers: [{provide: NG_VALIDATORS, useExisting: ForbiddenValidatorDirective, multi: true}]
})
export class ForbiddenValidatorDirective implements Validator, OnChanges {
@Input() public forbiddenName: string;
private valFn = Validators.nullValidator;
public
|
(changes: SimpleChanges): void {
// const change = changes['forbiddenName'];
// if (change) {
// const val: string | RegExp = change.currentValue;
// const re = val instanceof RegExp ? val : new RegExp(val, 'i');
// this.valFn = forbiddenNameValidator(re);
// } else {
// this.valFn = Validators.nullValidator;
// }
}
public validate(control: AbstractControl): {[key: string]: any} {
return this.valFn(control);
}
}
|
ngOnChanges
|
identifier_name
|
forbidden-name.directive.ts
|
import { Directive, Input, OnChanges, SimpleChanges } from '@angular/core';
import { AbstractControl, NG_VALIDATORS, Validator, ValidatorFn, Validators } from '@angular/forms';
export function forbiddenNameValidator(nameRe: RegExp): ValidatorFn {
return (control: AbstractControl): {[key: string]: any} => {
const name = control.value;
const no = nameRe.test(name);
return no ? {forbiddenName: {name}} : null;
};
}
@Directive({
selector: '[forbiddenName]',
providers: [{provide: NG_VALIDATORS, useExisting: ForbiddenValidatorDirective, multi: true}]
})
export class ForbiddenValidatorDirective implements Validator, OnChanges {
@Input() public forbiddenName: string;
private valFn = Validators.nullValidator;
|
// const change = changes['forbiddenName'];
// if (change) {
// const val: string | RegExp = change.currentValue;
// const re = val instanceof RegExp ? val : new RegExp(val, 'i');
// this.valFn = forbiddenNameValidator(re);
// } else {
// this.valFn = Validators.nullValidator;
// }
}
public validate(control: AbstractControl): {[key: string]: any} {
return this.valFn(control);
}
}
|
public ngOnChanges(changes: SimpleChanges): void {
|
random_line_split
|
server.ts
|
// the polyfills must be the first thing imported in node.js
import 'angular2-universal-polyfills';
import * as path from 'path';
|
const mongoose = require('mongoose');
require('dotenv').config();
// Angular 2
import { enableProdMode } from '@angular/core';
// Angular 2 Universal
import { createEngine } from 'angular2-express-engine';
// App
import { config } from './server/config';
var cache = require('memory-cache');
import { MainModule } from './main.node';
// enable prod for faster renders
enableProdMode();
const app = express();
const ROOT = path.join(path.resolve(__dirname, '..'));
const routeGroup = require('./server/routes/group');
const apiRoutes = require('./server/routes/api');
app.set('superSecret', config.secret);
// Connect to database.
mongoose.Promise = global.Promise;
mongoose.connect(config.db);
mongoose.connection.on('connected', () => { console.log('MongoDB connected'); });
// Express View
app.engine('.html', createEngine({
precompile: true,
ngModule: MainModule
}));
app.set('views', __dirname);
app.set('view engine', 'html');
app.use(cookieParser('Angular 2 Universal'));
app.use(bodyParser.json());
app.use((err, req, res, next) => {
if (err instanceof SyntaxError) {
res.status(400).json({ status: 400, message: 'Invalid Request' });
res.end();
}
});
// Serve static files
app.use('/assets', express.static(path.join(__dirname, 'assets'), {maxAge: 30}));
app.use(express.static(path.join(ROOT, 'dist/client'), {index: false}));
function ngApp(req, res) {
let url = req.originalUrl || '/';
let html = cache.get( url );
res.setHeader('Cache-Control', 'public, max-age=30');
if ( html ){
res.status(200).send(html);
return;
} else {
res.render('index', {
req,
res,
preboot: false,
baseUrl: '/',
requestUrl: req.originalUrl,
originUrl: 'http://localhost:3000'
}, (err, html) => {
res.status(200).send(html);
cache.put(url, html);
});
}
}
// Routes with html5pushstate
// ensure routes match client-side-app
app.get('/', ngApp);
app.get('/page', ngApp);
app.get('/login', ngApp);
app.use('/api/', apiRoutes);
app.get('*', (req, res) => {
res.status(404).json({ status: 404, message: 'No Content' });
});
// Server
let server = app.listen(process.env.PORT || 3000, () => {
console.log(`Listening on: http://localhost:${server.address().port}`);
});
|
import * as express from 'express';
import * as bodyParser from 'body-parser';
import * as cookieParser from 'cookie-parser';
|
random_line_split
|
server.ts
|
// the polyfills must be the first thing imported in node.js
import 'angular2-universal-polyfills';
import * as path from 'path';
import * as express from 'express';
import * as bodyParser from 'body-parser';
import * as cookieParser from 'cookie-parser';
const mongoose = require('mongoose');
require('dotenv').config();
// Angular 2
import { enableProdMode } from '@angular/core';
// Angular 2 Universal
import { createEngine } from 'angular2-express-engine';
// App
import { config } from './server/config';
var cache = require('memory-cache');
import { MainModule } from './main.node';
// enable prod for faster renders
enableProdMode();
const app = express();
const ROOT = path.join(path.resolve(__dirname, '..'));
const routeGroup = require('./server/routes/group');
const apiRoutes = require('./server/routes/api');
app.set('superSecret', config.secret);
// Connect to database.
mongoose.Promise = global.Promise;
mongoose.connect(config.db);
mongoose.connection.on('connected', () => { console.log('MongoDB connected'); });
// Express View
app.engine('.html', createEngine({
precompile: true,
ngModule: MainModule
}));
app.set('views', __dirname);
app.set('view engine', 'html');
app.use(cookieParser('Angular 2 Universal'));
app.use(bodyParser.json());
app.use((err, req, res, next) => {
if (err instanceof SyntaxError) {
res.status(400).json({ status: 400, message: 'Invalid Request' });
res.end();
}
});
// Serve static files
app.use('/assets', express.static(path.join(__dirname, 'assets'), {maxAge: 30}));
app.use(express.static(path.join(ROOT, 'dist/client'), {index: false}));
function ngApp(req, res) {
let url = req.originalUrl || '/';
let html = cache.get( url );
res.setHeader('Cache-Control', 'public, max-age=30');
if ( html )
|
else {
res.render('index', {
req,
res,
preboot: false,
baseUrl: '/',
requestUrl: req.originalUrl,
originUrl: 'http://localhost:3000'
}, (err, html) => {
res.status(200).send(html);
cache.put(url, html);
});
}
}
// Routes with html5pushstate
// ensure routes match client-side-app
app.get('/', ngApp);
app.get('/page', ngApp);
app.get('/login', ngApp);
app.use('/api/', apiRoutes);
app.get('*', (req, res) => {
res.status(404).json({ status: 404, message: 'No Content' });
});
// Server
let server = app.listen(process.env.PORT || 3000, () => {
console.log(`Listening on: http://localhost:${server.address().port}`);
});
|
{
res.status(200).send(html);
return;
}
|
conditional_block
|
server.ts
|
// the polyfills must be the first thing imported in node.js
import 'angular2-universal-polyfills';
import * as path from 'path';
import * as express from 'express';
import * as bodyParser from 'body-parser';
import * as cookieParser from 'cookie-parser';
const mongoose = require('mongoose');
require('dotenv').config();
// Angular 2
import { enableProdMode } from '@angular/core';
// Angular 2 Universal
import { createEngine } from 'angular2-express-engine';
// App
import { config } from './server/config';
var cache = require('memory-cache');
import { MainModule } from './main.node';
// enable prod for faster renders
enableProdMode();
const app = express();
const ROOT = path.join(path.resolve(__dirname, '..'));
const routeGroup = require('./server/routes/group');
const apiRoutes = require('./server/routes/api');
app.set('superSecret', config.secret);
// Connect to database.
mongoose.Promise = global.Promise;
mongoose.connect(config.db);
mongoose.connection.on('connected', () => { console.log('MongoDB connected'); });
// Express View
app.engine('.html', createEngine({
precompile: true,
ngModule: MainModule
}));
app.set('views', __dirname);
app.set('view engine', 'html');
app.use(cookieParser('Angular 2 Universal'));
app.use(bodyParser.json());
app.use((err, req, res, next) => {
if (err instanceof SyntaxError) {
res.status(400).json({ status: 400, message: 'Invalid Request' });
res.end();
}
});
// Serve static files
app.use('/assets', express.static(path.join(__dirname, 'assets'), {maxAge: 30}));
app.use(express.static(path.join(ROOT, 'dist/client'), {index: false}));
function
|
(req, res) {
let url = req.originalUrl || '/';
let html = cache.get( url );
res.setHeader('Cache-Control', 'public, max-age=30');
if ( html ){
res.status(200).send(html);
return;
} else {
res.render('index', {
req,
res,
preboot: false,
baseUrl: '/',
requestUrl: req.originalUrl,
originUrl: 'http://localhost:3000'
}, (err, html) => {
res.status(200).send(html);
cache.put(url, html);
});
}
}
// Routes with html5pushstate
// ensure routes match client-side-app
app.get('/', ngApp);
app.get('/page', ngApp);
app.get('/login', ngApp);
app.use('/api/', apiRoutes);
app.get('*', (req, res) => {
res.status(404).json({ status: 404, message: 'No Content' });
});
// Server
let server = app.listen(process.env.PORT || 3000, () => {
console.log(`Listening on: http://localhost:${server.address().port}`);
});
|
ngApp
|
identifier_name
|
server.ts
|
// the polyfills must be the first thing imported in node.js
import 'angular2-universal-polyfills';
import * as path from 'path';
import * as express from 'express';
import * as bodyParser from 'body-parser';
import * as cookieParser from 'cookie-parser';
const mongoose = require('mongoose');
require('dotenv').config();
// Angular 2
import { enableProdMode } from '@angular/core';
// Angular 2 Universal
import { createEngine } from 'angular2-express-engine';
// App
import { config } from './server/config';
var cache = require('memory-cache');
import { MainModule } from './main.node';
// enable prod for faster renders
enableProdMode();
const app = express();
const ROOT = path.join(path.resolve(__dirname, '..'));
const routeGroup = require('./server/routes/group');
const apiRoutes = require('./server/routes/api');
app.set('superSecret', config.secret);
// Connect to database.
mongoose.Promise = global.Promise;
mongoose.connect(config.db);
mongoose.connection.on('connected', () => { console.log('MongoDB connected'); });
// Express View
app.engine('.html', createEngine({
precompile: true,
ngModule: MainModule
}));
app.set('views', __dirname);
app.set('view engine', 'html');
app.use(cookieParser('Angular 2 Universal'));
app.use(bodyParser.json());
app.use((err, req, res, next) => {
if (err instanceof SyntaxError) {
res.status(400).json({ status: 400, message: 'Invalid Request' });
res.end();
}
});
// Serve static files
app.use('/assets', express.static(path.join(__dirname, 'assets'), {maxAge: 30}));
app.use(express.static(path.join(ROOT, 'dist/client'), {index: false}));
function ngApp(req, res)
|
// Routes with html5pushstate
// ensure routes match client-side-app
app.get('/', ngApp);
app.get('/page', ngApp);
app.get('/login', ngApp);
app.use('/api/', apiRoutes);
app.get('*', (req, res) => {
res.status(404).json({ status: 404, message: 'No Content' });
});
// Server
let server = app.listen(process.env.PORT || 3000, () => {
console.log(`Listening on: http://localhost:${server.address().port}`);
});
|
{
let url = req.originalUrl || '/';
let html = cache.get( url );
res.setHeader('Cache-Control', 'public, max-age=30');
if ( html ){
res.status(200).send(html);
return;
} else {
res.render('index', {
req,
res,
preboot: false,
baseUrl: '/',
requestUrl: req.originalUrl,
originUrl: 'http://localhost:3000'
}, (err, html) => {
res.status(200).send(html);
cache.put(url, html);
});
}
}
|
identifier_body
|
interwebs.py
|
"""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Response = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
|
)
return Response(response_payload, response_headers, status, is_json)
def request_url(verb, url, payload=None, headers=None, auth=None):
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
|
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
|
random_line_split
|
interwebs.py
|
"""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Response = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
)
return Response(response_payload, response_headers, status, is_json)
def
|
(verb, url, payload=None, headers=None, auth=None):
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
|
request_url
|
identifier_name
|
interwebs.py
|
"""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Response = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
|
return Response(response_payload, response_headers, status, is_json)
def request_url(verb, url, payload=None, headers=None, auth=None):
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
|
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
)
|
conditional_block
|
interwebs.py
|
"""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Response = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
)
return Response(response_payload, response_headers, status, is_json)
def request_url(verb, url, payload=None, headers=None, auth=None):
|
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
|
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
|
identifier_body
|
ObjectFactory.js
|
var inherit = require('./inherit'),
Sprite = require('../display/Sprite'),
Tilemap = require('../tilemap/Tilemap'),
Rectangle = require('../geom/Rectangle'),
BitmapText = require('../text/BitmapText');
/**
* The object factory makes it simple to create and add objects to a parent. One is added
* to a State's world and camera by default, but they can be used for any parent but they
* can only belong to a single state.
*
* @class ObjectFactory
* @extends Object
* @constructor
* @param state {State} The game state this factory belongs to
* @param parent {Container} The container to act as the parent for created objects
*/
var ObjectFactory = function(state, parent) {
this.state = state;
this.game = state.game;
this.parent = parent;
};
inherit(ObjectFactory, Object, {
/**
* Adds a generic object to the world or camera
*
* @method obj
* @param object {mixed} Any game object you want to add to the parent
* @return {mixed} Returns the added object
*/
obj: function(obj) {
return this.parent.addChild(obj);
},
/**
* Creates a new sprite and adds it to the game world
*
* @method sprite
* @param texture {String|Texture} The texture for the sprite, or the key for one in the cache
* @param [frame=null] {String|Number} A specific frame of a sprite sheet to use, either the index or string key
* depending on the type of the sheet when loaded.
* @param [physics=true] {Boolean} Should this sprite be added to the physics simulation?
* @return {Sprite} The sprite added
*/
sprite: function(tx, frame, physics) {
var spr,
game = this.game;
if(typeof tx === 'string') {
if(frame || frame === 0)
tx = game.cache.getTextures(tx)[frame];
else
tx = game.cache.getTexture(tx);
}
if(!tx) {
tx = game.cache.getTexture('__default');
}
spr = new Sprite(tx);
//if undefined, then default to true
if(physics || physics === undefined) {
spr.enablePhysics(this.state.physics);
//this.state.physics.addSprite(spr);
}
return this.parent.addChild(spr);
},
/**
* Creates a new AudioPlayer to play the sound passed in
*
* @method audio
* @param key {String} The unique cache key for the preloaded audio
* @param [settings] {Object} All the settings for the audio player (see AudioManager.add for all settings)
* @return {AudioPlayer} The player added
*/
audio: function(key, settings) {
return this.state.audio.add(key, settings);
},
/**
* Creates a new tilemap to add to the world
*
* @method tilemap
* @param key {String} The unique cache key for the preloaded tilemap data
* @param [constrain=true] {Boolean} Should the camera be constrained to this tilemap's size?
* @return {Tilemap} The tilemap added
*/
tilemap: function(key, constrain) {
var obj = this.game.cache.getTilemap(key) || {},
tilemap = new Tilemap(this.state, obj.data, obj.textures);
if(constrain) {
this.state.camera.constrain(new Rectangle(0, 0, tilemap.realSize.x, tilemap.realSize.y));
}
//force render of tilemap
tilemap.render(
-this.state.world.position.x,
-this.state.world.position.x,
this.game.width,
this.game.height
);
tilemap._cachekey = key;
return this.parent.addChild(tilemap);
},
/**
* Creates a new instance of BitmapText
*
* @method bitmaptext
* @param text {String} The text for the BitmapText to display
* @param font {String} The key for the bitmap font loaded into the cache
* @param interactive {Boolean} Can the item be interacted with by mouse (clicked, dragged, etc)
* @return {BitmapText} The bitmap text object added
*/
bitmaptext: function(text, font, style) {
if(typeof font === 'string')
|
return this.parent.addChild(new BitmapText(text, font, style));
}
});
module.exports = ObjectFactory;
|
font = this.game.cache.getBitmapFont(font);
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.